Start runs and coordinate with server
This commit is contained in:
parent
f79468c871
commit
b23fc6460f
6 changed files with 296 additions and 32 deletions
|
|
@ -1,5 +1,8 @@
|
|||
//! Coordinate performing runs across servers.
|
||||
|
||||
use std::time::Duration;
|
||||
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
struct Server {
|
||||
|
|
@ -9,32 +12,48 @@ struct Server {
|
|||
|
||||
pub struct Coordinator {
|
||||
servers: Vec<Server>,
|
||||
current: usize,
|
||||
active: usize,
|
||||
active_since: OffsetDateTime,
|
||||
busy: bool,
|
||||
}
|
||||
|
||||
impl Coordinator {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
servers: vec![],
|
||||
current: 0,
|
||||
active: 0,
|
||||
active_since: OffsetDateTime::now_utc(),
|
||||
busy: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn register(&mut self, name: String, poke: mpsc::UnboundedSender<()>) {
|
||||
// TODO Assert that no duplicate names exist?
|
||||
self.servers.push(Server { name, poke });
|
||||
}
|
||||
|
||||
pub fn active(&self, name: &str) -> bool {
|
||||
if let Some(current) = self.servers.get(self.current) {
|
||||
name == current.name
|
||||
} else {
|
||||
false
|
||||
pub fn active(&self, name: &str) -> ActiveInfo {
|
||||
let active_server = self.servers.get(self.active);
|
||||
let active = active_server.filter(|s| s.name == name).is_some();
|
||||
ActiveInfo {
|
||||
active,
|
||||
active_since: self.active_since,
|
||||
busy: self.busy,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next(&mut self, name: &str) {
|
||||
pub fn look_busy(&mut self, name: &str) {
|
||||
// Check just to prevent weird shenanigans
|
||||
if !self.active(name) {
|
||||
if !self.active(name).active {
|
||||
return;
|
||||
}
|
||||
|
||||
self.busy = true;
|
||||
}
|
||||
|
||||
pub fn move_to_next_server(&mut self, name: &str) {
|
||||
// Check just to prevent weird shenanigans
|
||||
if !self.active(name).active {
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -42,8 +61,10 @@ impl Coordinator {
|
|||
// the previous check
|
||||
assert!(!self.servers.is_empty());
|
||||
|
||||
self.current += 1;
|
||||
self.current %= self.servers.len();
|
||||
self.active += 1;
|
||||
self.active %= self.servers.len();
|
||||
self.active_since = OffsetDateTime::now_utc();
|
||||
self.busy = false;
|
||||
|
||||
// When the worker seeks work and a queue is idle, the next server
|
||||
// should be queried immediately. Otherwise, we'd introduce lots of
|
||||
|
|
@ -61,8 +82,23 @@ impl Coordinator {
|
|||
// will send two requests back-to-back: The first because their ping
|
||||
// timeout ran out, and the second because they were poked. So far, I
|
||||
// haven't been able to think of an elegant solution for this.
|
||||
if self.current > 0 {
|
||||
let _ = self.servers[self.current].poke.send(());
|
||||
if self.active > 0 {
|
||||
let _ = self.servers[self.active].poke.send(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct ActiveInfo {
|
||||
pub active: bool,
|
||||
pub active_since: OffsetDateTime,
|
||||
pub busy: bool,
|
||||
}
|
||||
|
||||
impl ActiveInfo {
|
||||
pub fn in_batch(&self, batch_duration: Duration) -> bool {
|
||||
let batch_end = self.active_since + batch_duration;
|
||||
let now = OffsetDateTime::now_utc();
|
||||
now <= batch_end
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue