Switch from tracing to log crate

This commit is contained in:
Joscha 2023-08-17 03:09:24 +02:00
parent 1faf42bd82
commit f12da915a9
16 changed files with 149 additions and 202 deletions

View file

@ -4,37 +4,13 @@ mod fetch;
mod queue;
mod repo;
use tracing::{debug_span, error, warn_span, Instrument};
use super::{Repo, Server};
async fn recurring_task(state: &Server, repo: Repo) {
fetch::update(state.config, repo.clone())
.instrument(debug_span!("fetch refs"))
.await;
async {
if let Err(e) = repo::update(&state.db, repo).await {
error!("Error updating repo:\n{e:?}");
};
}
.instrument(debug_span!("update repo"))
.await;
async {
if let Err(e) = queue::update(&state.db).await {
error!("Error updating queue:\n{e:?}");
};
}
.instrument(debug_span!("update queue"))
.await;
}
pub(super) async fn run(server: Server, repo: Repo) {
loop {
recurring_task(&server, repo.clone())
.instrument(warn_span!("update"))
.await;
fetch::update(server.config, repo.clone()).await;
repo::update(&server.db, repo.clone()).await;
queue::update(&server.db).await;
tokio::time::sleep(server.config.repo_update).await;
}

View file

@ -2,8 +2,8 @@
use std::process::Command;
use gix::bstr::ByteVec;
use tracing::{info, warn};
use gix::bstr::ByteSlice;
use log::{info, warn};
use crate::{config::ServerConfig, server::Repo, somehow};
@ -25,9 +25,15 @@ fn fetch(repo: Repo, url: &str, refspecs: &[String]) -> somehow::Result<()> {
let output = command.output()?;
if output.status.success() {
} else {
warn!(exitcode = %output.status, "'git fetch' failed");
warn!(output = "stdout", "{}", output.stdout.into_string_lossy());
warn!(output = "stderr", "{}", output.stderr.into_string_lossy());
warn!(
"Error fetching refs:\n\
{command:?} exited with code {}\n\
STDOUT:\n{}\n\
STDERR:\n{}",
output.status,
output.stdout.to_str_lossy(),
output.stderr.to_str_lossy()
);
}
Ok(())
@ -38,7 +44,7 @@ async fn inner(repo: Repo, url: &'static str, refspecs: &'static [String]) -> so
Ok(())
}
pub async fn update(config: &'static ServerConfig, repo: Repo) {
pub(super) async fn update(config: &'static ServerConfig, repo: Repo) {
if let Some(url) = &config.repo_fetch_url {
if let Err(e) = inner(repo, url, &config.repo_fetch_refspecs).await {
warn!("Error fetching refs:\n{e:?}");

View file

@ -1,10 +1,10 @@
use log::{debug, info, warn};
use sqlx::{Acquire, SqlitePool};
use time::OffsetDateTime;
use tracing::debug;
use crate::somehow;
pub async fn update(db: &SqlitePool) -> somehow::Result<()> {
async fn inner(db: &SqlitePool) -> somehow::Result<()> {
debug!("Updating queue");
let mut tx = db.begin().await?;
let conn = tx.acquire().await?;
@ -13,20 +13,22 @@ pub async fn update(db: &SqlitePool) -> somehow::Result<()> {
let new = sqlx::query!("SELECT hash FROM commits WHERE new AND reachable = 2")
.fetch_all(&mut *conn)
.await?;
let new_len = new.len();
// Insert them into the queue
for row in new {
let date = OffsetDateTime::now_utc();
sqlx::query!(
let result = sqlx::query!(
"INSERT OR IGNORE INTO queue (hash, date) VALUES (?, ?)",
row.hash,
date,
)
.execute(&mut *conn)
.await?;
if result.rows_affected() > 0 {
info!("Added new commit {} to the queue", row.hash);
}
}
debug!("Added {new_len} commits to the queue");
// Mark all commits as old
sqlx::query!("UPDATE commits SET new = false")
@ -34,6 +36,11 @@ pub async fn update(db: &SqlitePool) -> somehow::Result<()> {
.await?;
tx.commit().await?;
debug!("Updated queue");
Ok(())
}
pub(super) async fn update(db: &SqlitePool) {
if let Err(e) = inner(db).await {
warn!("Error updating queue:\n{e:?}");
}
}

View file

@ -4,9 +4,9 @@ use std::collections::HashSet;
use futures::TryStreamExt;
use gix::{date::Time, objs::Kind, prelude::ObjectIdExt, refs::Reference, ObjectId, Repository};
use log::{debug, info, warn};
use sqlx::{Acquire, SqliteConnection, SqlitePool};
use time::{OffsetDateTime, UtcOffset};
use tracing::{debug, info};
use crate::{
server::{util, Repo},
@ -111,11 +111,11 @@ async fn insert_new_commits(
.execute(&mut *conn)
.await?;
// So the user has something to look at while importing big repos
if (i + 1) % 100000 == 0 {
debug!("Inserted {} commits so far", i + 1);
info!("(1/2) Inserting commits: {}/{}", i + 1, new.len());
}
}
debug!("Inserted {} commits in total", new.len());
Ok(())
}
@ -140,11 +140,11 @@ async fn insert_new_commit_links(
.await?;
}
// So the user has something to look at while importing big repos
if (i + 1) % 100000 == 0 {
debug!("Inserted {} commits' links so far", i + 1);
info!("(2/2) Inserting links: {}/{}", i + 1, new.len());
}
}
debug!("Inserted {} commits' links in total", new.len());
Ok(())
}
@ -234,8 +234,8 @@ async fn update_commit_tracked_status(conn: &mut SqliteConnection) -> somehow::R
Ok(())
}
pub(super) async fn update(db: &SqlitePool, repo: Repo) -> somehow::Result<()> {
debug!("Updating repo");
pub async fn inner(db: &SqlitePool, repo: Repo) -> somehow::Result<()> {
debug!("Updating repo data");
let thread_local_repo = repo.0.to_thread_local();
let mut tx = db.begin().await?;
let conn = tx.acquire().await?;
@ -254,7 +254,11 @@ pub(super) async fn update(db: &SqlitePool, repo: Repo) -> somehow::Result<()> {
get_all_refs_and_new_commits_from_repo(&repo.0.to_thread_local(), &old)
})
.await??;
debug!("Found {} new commits in repo", new.len());
if new.is_empty() {
debug!("Found no new commits in repo");
} else {
info!("Found {} new commits in repo", new.len());
}
// Defer foreign key checks until the end of the transaction to improve
// insert performance.
@ -282,6 +286,11 @@ pub(super) async fn update(db: &SqlitePool, repo: Repo) -> somehow::Result<()> {
if repo_is_new {
info!("Initialized new repo");
}
debug!("Updated repo");
Ok(())
}
pub(super) async fn update(db: &SqlitePool, repo: Repo) {
if let Err(e) = inner(db, repo).await {
warn!("Error updating repo data:\n{e:?}");
}
}

View file

@ -3,6 +3,7 @@ use axum::{
response::{IntoResponse, Redirect},
Form,
};
use log::info;
use serde::Deserialize;
use sqlx::SqlitePool;
use time::OffsetDateTime;
@ -46,6 +47,11 @@ pub async fn post_admin_queue_add(
.execute(&db)
.await?;
info!(
"Admin added {} to queue with priority {}",
form.hash, form.priority,
);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}
@ -65,6 +71,8 @@ pub async fn post_admin_queue_delete(
.execute(&db)
.await?;
info!("Admin deleted {} from queue", form.hash);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}
@ -87,6 +95,8 @@ pub async fn post_admin_queue_increase(
.execute(&db)
.await?;
info!("Admin increased queue priority of {} by one", form.hash);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}
@ -109,6 +119,8 @@ pub async fn post_admin_queue_decrease(
.execute(&db)
.await?;
info!("Admin decreased queue priority of {} by one", form.hash);
let link = Base::link_with_config(config, PathQueue {});
Ok(Redirect::to(&format!("{link}")))
}

View file

@ -13,9 +13,9 @@ use axum::{
Json, TypedHeader,
};
use gix::{ObjectId, ThreadSafeRepository};
use log::{debug, info};
use sqlx::{Acquire, SqlitePool};
use time::OffsetDateTime;
use tracing::debug;
use crate::{
config::ServerConfig,
@ -136,8 +136,10 @@ pub async fn post_api_worker_status(
Ok(name) => name,
Err(response) => return Ok(response),
};
debug!("Received status update from {name}");
if let Some(run) = request.submit_run {
info!("Received run {} for {} from {name}", run.id, run.hash);
save_work(run, &name, &request.info, &db).await?;
}
@ -178,7 +180,6 @@ pub async fn post_api_worker_status(
(work, abort_work)
};
debug!("Received status update from {name}");
Ok(Json(ServerResponse {
run: work,
abort_run: abort_work,
@ -208,10 +209,11 @@ pub async fn get_api_worker_repo_by_hash_tree_tar_gz(
State(repo): State<Option<Repo>>,
auth: Option<TypedHeader<Authorization<Basic>>>,
) -> somehow::Result<Response> {
let _name = match auth::authenticate(config, auth) {
let name = match auth::authenticate(config, auth) {
Ok(name) => name,
Err(response) => return Ok(response),
};
debug!("Worker {name} is downloading repo hash {}", path.hash);
let Some(repo) = repo else {
return Ok(StatusCode::NOT_FOUND.into_response());
@ -227,10 +229,11 @@ pub async fn get_api_worker_bench_repo_by_hash_tree_tar_gz(
State(bench_repo): State<Option<BenchRepo>>,
auth: Option<TypedHeader<Authorization<Basic>>>,
) -> somehow::Result<Response> {
let _name = match auth::authenticate(config, auth) {
let name = match auth::authenticate(config, auth) {
Ok(name) => name,
Err(response) => return Ok(response),
};
debug!("Worker {name} is downloading bench repo hash {}", path.hash);
let Some(bench_repo) = bench_repo else {
return Ok(StatusCode::NOT_FOUND.into_response());

View file

@ -12,9 +12,9 @@ use gix::{
bstr::ByteSlice, objs::tree::EntryMode, prelude::ObjectIdExt, worktree::stream::Entry,
ObjectId, ThreadSafeRepository,
};
use log::warn;
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use tracing::{debug, warn};
const BLOCK_SIZE: usize = 1024 * 1024;
const COMPRESSION_LEVEL: Compression = Compression::fast();
@ -114,8 +114,6 @@ pub fn tar_and_gzip(
if let Err(e) = write_worktree(repo, id, tx.clone()) {
warn!("Error while streaming tar:\n{e:?}");
let _ = tx.blocking_send(Err(e));
} else {
debug!("Tar streamed successfully");
}
});
ReceiverStream::new(rx)

View file

@ -1,6 +1,6 @@
mod util;
use std::{collections::HashMap, time::Instant};
use std::collections::HashMap;
use askama::Template;
use axum::{extract::State, response::IntoResponse, Json};
@ -9,7 +9,6 @@ use futures::{StreamExt, TryStreamExt};
use serde::{Deserialize, Serialize};
use sqlx::{Acquire, SqlitePool};
use time::OffsetDateTime;
use tracing::debug;
use crate::{
config::ServerConfig,
@ -150,12 +149,6 @@ struct GraphData {
measurements: HashMap<String, Vec<Option<f64>>>,
}
fn f(t0: &mut Instant, name: &str) {
let now = Instant::now();
debug!("{name:>20} {}", (now - *t0).as_millis());
*t0 = Instant::now();
}
pub async fn get_graph_data(
_path: PathGraphData,
State(db): State<SqlitePool>,
@ -170,9 +163,6 @@ pub async fn get_graph_data(
// TODO Limit by date or amount
// TODO Limit to tracked commits
let mut t0 = Instant::now();
debug!("");
let mut unsorted_hashes = Vec::<String>::new();
let mut times_by_hash = HashMap::<String, i64>::new();
let mut rows = sqlx::query!(
@ -193,8 +183,6 @@ pub async fn get_graph_data(
}
drop(rows);
f(&mut t0, "hashes, times");
let parent_child_pairs = sqlx::query!(
"\
SELECT parent, child \
@ -209,13 +197,9 @@ pub async fn get_graph_data(
.try_collect::<Vec<_>>()
.await?;
f(&mut t0, "parent-child");
let mut hashes = util::sort_topologically(&unsorted_hashes, &parent_child_pairs);
hashes.sort_by_key(|hash| times_by_hash[hash]);
f(&mut t0, "sort");
let sorted_hash_indices = hashes
.iter()
.cloned()
@ -223,8 +207,6 @@ pub async fn get_graph_data(
.map(|(i, hash)| (hash, i))
.collect::<HashMap<_, _>>();
f(&mut t0, "hash indices");
let mut parents = HashMap::<usize, Vec<usize>>::new();
for (parent, child) in &parent_child_pairs {
if let Some(parent_idx) = sorted_hash_indices.get(parent) {
@ -234,24 +216,18 @@ pub async fn get_graph_data(
}
}
f(&mut t0, "parents");
// Collect times
let times = hashes
.iter()
.map(|hash| times_by_hash[hash])
.collect::<Vec<_>>();
f(&mut t0, "times");
// permutation[unsorted_index] = sorted_index
let permutation = unsorted_hashes
.iter()
.map(|hash| sorted_hash_indices[hash])
.collect::<Vec<_>>();
f(&mut t0, "permutation");
// Collect and permutate measurements
let mut measurements = HashMap::new();
for metric in form.metric {
@ -284,9 +260,6 @@ pub async fn get_graph_data(
measurements.insert(metric, values);
}
f(&mut t0, "measurements");
debug!("");
Ok(Json(GraphData {
hashes,
parents,