Switch from tracing to log crate

This commit is contained in:
Joscha 2023-08-17 03:09:24 +02:00
parent 1faf42bd82
commit f12da915a9
16 changed files with 149 additions and 202 deletions

View file

@ -3,6 +3,7 @@ use axum::{
response::{IntoResponse, Redirect},
Form,
};
use log::info;
use serde::Deserialize;
use sqlx::SqlitePool;
use time::OffsetDateTime;
@ -46,6 +47,11 @@ pub async fn post_admin_queue_add(
.execute(&db)
.await?;
info!(
"Admin added {} to queue with priority {}",
form.hash, form.priority,
);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}
@ -65,6 +71,8 @@ pub async fn post_admin_queue_delete(
.execute(&db)
.await?;
info!("Admin deleted {} from queue", form.hash);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}
@ -87,6 +95,8 @@ pub async fn post_admin_queue_increase(
.execute(&db)
.await?;
info!("Admin increased queue priority of {} by one", form.hash);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}
@ -109,6 +119,8 @@ pub async fn post_admin_queue_decrease(
.execute(&db)
.await?;
info!("Admin decreased queue priority of {} by one", form.hash);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}

View file

@ -13,9 +13,9 @@ use axum::{
Json, TypedHeader,
};
use gix::{ObjectId, ThreadSafeRepository};
use log::{debug, info};
use sqlx::{Acquire, SqlitePool};
use time::OffsetDateTime;
use tracing::debug;
use crate::{
config::ServerConfig,
@ -136,8 +136,10 @@ pub async fn post_api_worker_status(
Ok(name) => name,
Err(response) => return Ok(response),
};
debug!("Received status update from {name}");
if let Some(run) = request.submit_run {
info!("Received run {} for {} from {name}", run.id, run.hash);
save_work(run, &name, &request.info, &db).await?;
}
@ -178,7 +180,6 @@ pub async fn post_api_worker_status(
(work, abort_work)
};
debug!("Received status update from {name}");
Ok(Json(ServerResponse {
run: work,
abort_run: abort_work,
@ -208,10 +209,11 @@ pub async fn get_api_worker_repo_by_hash_tree_tar_gz(
State(repo): State<Option<Repo>>,
auth: Option<TypedHeader<Authorization<Basic>>>,
) -> somehow::Result<Response> {
let _name = match auth::authenticate(config, auth) {
let name = match auth::authenticate(config, auth) {
Ok(name) => name,
Err(response) => return Ok(response),
};
debug!("Worker {name} is downloading repo hash {}", path.hash);
let Some(repo) = repo else {
return Ok(StatusCode::NOT_FOUND.into_response());
@ -227,10 +229,11 @@ pub async fn get_api_worker_bench_repo_by_hash_tree_tar_gz(
State(bench_repo): State<Option<BenchRepo>>,
auth: Option<TypedHeader<Authorization<Basic>>>,
) -> somehow::Result<Response> {
let _name = match auth::authenticate(config, auth) {
let name = match auth::authenticate(config, auth) {
Ok(name) => name,
Err(response) => return Ok(response),
};
debug!("Worker {name} is downloading bench repo hash {}", path.hash);
let Some(bench_repo) = bench_repo else {
return Ok(StatusCode::NOT_FOUND.into_response());

View file

@ -12,9 +12,9 @@ use gix::{
bstr::ByteSlice, objs::tree::EntryMode, prelude::ObjectIdExt, worktree::stream::Entry,
ObjectId, ThreadSafeRepository,
};
use log::warn;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, warn};
const BLOCK_SIZE: usize = 1024 * 1024;
const COMPRESSION_LEVEL: Compression = Compression::fast();
@ -114,8 +114,6 @@ pub fn tar_and_gzip(
if let Err(e) = write_worktree(repo, id, tx.clone()) {
warn!("Error while streaming tar:\n{e:?}");
let _ = tx.blocking_send(Err(e));
} else {
debug!("Tar streamed successfully");
}
});
ReceiverStream::new(rx)

View file

@ -1,6 +1,6 @@
mod util;
use std::{collections::HashMap, time::Instant};
use std::collections::HashMap;
use askama::Template;
use axum::{extract::State, response::IntoResponse, Json};
@ -9,7 +9,6 @@ use futures::{StreamExt, TryStreamExt};
use serde::{Deserialize, Serialize};
use sqlx::{Acquire, SqlitePool};
use time::OffsetDateTime;
use tracing::debug;
use crate::{
config::ServerConfig,
@ -150,12 +149,6 @@ struct GraphData {
measurements: HashMap<String, Vec<Option<f64>>>,
}
fn f(t0: &mut Instant, name: &str) {
let now = Instant::now();
debug!("{name:>20} {}", (now - *t0).as_millis());
*t0 = Instant::now();
}
pub async fn get_graph_data(
_path: PathGraphData,
State(db): State<SqlitePool>,
@ -170,9 +163,6 @@ pub async fn get_graph_data(
// TODO Limit by date or amount
// TODO Limit to tracked commits
let mut t0 = Instant::now();
debug!("");
let mut unsorted_hashes = Vec::<String>::new();
let mut times_by_hash = HashMap::<String, i64>::new();
let mut rows = sqlx::query!(
@ -193,8 +183,6 @@ pub async fn get_graph_data(
}
drop(rows);
f(&mut t0, "hashes, times");
let parent_child_pairs = sqlx::query!(
"\
SELECT parent, child \
@ -209,13 +197,9 @@ pub async fn get_graph_data(
.try_collect::<Vec<_>>()
.await?;
f(&mut t0, "parent-child");
let mut hashes = util::sort_topologically(&unsorted_hashes, &parent_child_pairs);
hashes.sort_by_key(|hash| times_by_hash[hash]);
f(&mut t0, "sort");
let sorted_hash_indices = hashes
.iter()
.cloned()
@ -223,8 +207,6 @@ pub async fn get_graph_data(
.map(|(i, hash)| (hash, i))
.collect::<HashMap<_, _>>();
f(&mut t0, "hash indices");
let mut parents = HashMap::<usize, Vec<usize>>::new();
for (parent, child) in &parent_child_pairs {
if let Some(parent_idx) = sorted_hash_indices.get(parent) {
@ -234,24 +216,18 @@ pub async fn get_graph_data(
}
}
f(&mut t0, "parents");
// Collect times
let times = hashes
.iter()
.map(|hash| times_by_hash[hash])
.collect::<Vec<_>>();
f(&mut t0, "times");
// permutation[unsorted_index] = sorted_index
let permutation = unsorted_hashes
.iter()
.map(|hash| sorted_hash_indices[hash])
.collect::<Vec<_>>();
f(&mut t0, "permutation");
// Collect and permutate measurements
let mut measurements = HashMap::new();
for metric in form.metric {
@ -284,9 +260,6 @@ pub async fn get_graph_data(
measurements.insert(metric, values);
}
f(&mut t0, "measurements");
debug!("");
Ok(Json(GraphData {
hashes,
parents,