Implement placeholder server responses

This commit is contained in:
Joscha 2023-10-22 16:29:20 +02:00
parent 3dc54738fa
commit f77ed130e1
10 changed files with 85 additions and 357 deletions

View file

@ -26,7 +26,7 @@ use self::{
},
pages::{
commit::get_commit_by_hash,
graph::{get_graph, get_graph_data, get_graph_metrics},
graph::{get_graph, get_graph_commits, get_graph_measurements, get_graph_metrics},
index::get_index,
queue::{get_queue, get_queue_delete, get_queue_inner},
run::get_run_by_id,
@ -48,7 +48,8 @@ pub async fn run(server: Server) -> somehow::Result<()> {
.typed_get(get_api_worker_repo_by_hash_tree_tar_gz)
.typed_get(get_commit_by_hash)
.typed_get(get_graph)
.typed_get(get_graph_data)
.typed_get(get_graph_commits)
.typed_get(get_graph_measurements)
.typed_get(get_graph_metrics)
.typed_get(get_index)
.typed_get(get_queue)

View file

@ -1,20 +1,16 @@
mod util;
use std::collections::HashMap;
use askama::Template;
use axum::{extract::State, response::IntoResponse, Json};
use axum_extra::extract::Query;
use futures::{StreamExt, TryStreamExt};
use serde::{Deserialize, Serialize};
use sqlx::{Acquire, SqlitePool};
use time::OffsetDateTime;
use sqlx::SqlitePool;
use crate::{
config::ServerConfig,
server::web::{
base::{Base, Link, Tab},
paths::{PathGraph, PathGraphData, PathGraphMetrics},
paths::{PathGraph, PathGraphCommits, PathGraphMeasurements, PathGraphMetrics},
r#static::{GRAPH_JS, UPLOT_CSS},
},
somehow,
@ -41,7 +37,9 @@ pub async fn get_graph(
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct MetricsResponse {
data_id: i64,
metrics: Vec<String>,
}
@ -54,139 +52,59 @@ pub async fn get_graph_metrics(
.fetch_all(&db)
.await?;
Ok(Json(MetricsResponse { metrics }))
Ok(Json(MetricsResponse {
data_id: 0, // TODO Implement
metrics,
}))
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
struct CommitsResponse {
graph_id: i64,
hash_by_hash: Vec<String>,
author_by_hash: Vec<String>,
committer_date_by_hash: Vec<i64>,
message_by_hash: Vec<String>,
parents_by_hash: Vec<Vec<String>>,
}
pub async fn get_graph_commits(
_path: PathGraphCommits,
State(db): State<SqlitePool>,
) -> somehow::Result<impl IntoResponse> {
Ok(Json(CommitsResponse {
graph_id: 0, // TODO Implement
hash_by_hash: vec![], // TODO Implement
author_by_hash: vec![], // TODO Implement
committer_date_by_hash: vec![], // TODO Implement
message_by_hash: vec![], // TODO Implement
parents_by_hash: vec![], // TODO Implement
}))
}
#[derive(Deserialize)]
pub struct QueryGraphData {
pub struct QueryGraphMeasurements {
#[serde(default)]
metric: Vec<String>,
}
#[derive(Serialize)]
struct GraphData {
hashes: Vec<String>,
parents: HashMap<usize, Vec<usize>>,
times: Vec<i64>,
// TODO f32 for smaller transmission size?
measurements: HashMap<String, Vec<Option<f64>>>,
#[serde(rename_all = "camelCase")]
struct MeasurementsResponse {
graph_id: i64,
data_id: i64,
measurements: HashMap<String, Vec<f64>>,
}
pub async fn get_graph_data(
_path: PathGraphData,
pub async fn get_graph_measurements(
_path: PathGraphMeasurements,
State(db): State<SqlitePool>,
Query(form): Query<QueryGraphData>,
Query(form): Query<QueryGraphMeasurements>,
) -> somehow::Result<impl IntoResponse> {
let mut tx = db.begin().await?;
let conn = tx.acquire().await?;
// The SQL queries that return one result per commit *must* return the same
// amount of rows in the same order!
// TODO Limit by date or amount
let mut unsorted_hashes = Vec::<String>::new();
let mut times_by_hash = HashMap::<String, i64>::new();
let mut rows = sqlx::query!(
"\
SELECT \
hash, \
committer_date AS \"time: OffsetDateTime\" \
FROM commits \
WHERE reachable = 2 \
ORDER BY hash ASC \
"
)
.fetch(&mut *conn);
while let Some(row) = rows.next().await {
let row = row?;
unsorted_hashes.push(row.hash.clone());
times_by_hash.insert(row.hash, row.time.unix_timestamp());
}
drop(rows);
let parent_child_pairs = sqlx::query!(
"\
SELECT parent, child \
FROM commit_links \
JOIN commits ON hash = parent \
WHERE reachable = 2 \
ORDER BY parent ASC, child ASC \
"
)
.fetch(&mut *conn)
.map_ok(|r| (r.parent, r.child))
.try_collect::<Vec<_>>()
.await?;
let mut hashes = util::sort_topologically(&unsorted_hashes, &parent_child_pairs);
hashes.sort_by_key(|hash| times_by_hash[hash]);
let sorted_hash_indices = hashes
.iter()
.cloned()
.enumerate()
.map(|(i, hash)| (hash, i))
.collect::<HashMap<_, _>>();
let mut parents = HashMap::<usize, Vec<usize>>::new();
for (parent, child) in &parent_child_pairs {
if let Some(parent_idx) = sorted_hash_indices.get(parent) {
if let Some(child_idx) = sorted_hash_indices.get(child) {
parents.entry(*parent_idx).or_default().push(*child_idx);
}
}
}
// Collect times
let times = hashes
.iter()
.map(|hash| times_by_hash[hash])
.collect::<Vec<_>>();
// permutation[unsorted_index] = sorted_index
let permutation = unsorted_hashes
.iter()
.map(|hash| sorted_hash_indices[hash])
.collect::<Vec<_>>();
// Collect and permutate measurements
let mut measurements = HashMap::new();
for metric in form.metric {
let mut values = vec![None; hashes.len()];
let mut rows = sqlx::query_scalar!(
"\
WITH \
measurements AS ( \
SELECT hash, value, MAX(start) \
FROM runs \
JOIN run_measurements USING (id) \
WHERE metric = ? \
GROUP BY hash \
) \
SELECT value \
FROM commits \
LEFT JOIN measurements USING (hash) \
WHERE reachable = 2 \
ORDER BY hash ASC \
",
metric,
)
.fetch(&mut *conn)
.enumerate();
while let Some((i, value)) = rows.next().await {
values[permutation[i]] = value?;
}
drop(rows);
measurements.insert(metric, values);
}
Ok(Json(GraphData {
hashes,
parents,
times,
measurements,
Ok(Json(MeasurementsResponse {
graph_id: 0, // TODO Implement
data_id: 0, // TODO Implement
measurements: HashMap::new(), // TODO Implement
}))
}

View file

@ -1,89 +0,0 @@
use std::collections::{HashMap, HashSet};
/// Sort commits topologically such that parents come before their children.
///
/// Assumes that `parent_child_pairs` contains no duplicates and is in the
/// desired order (see below for more info on the order).
///
/// The algorithm used is a version of [Kahn's algorithm][0] that starts at the
/// nodes with no parents. It uses a stack for the set of parentless nodes,
/// meaning the resulting commit order is depth-first-y, not breadth-first-y.
/// For example, this commit graph (where children are ordered top to bottom)
/// results in the order `A, B, C, D, E, F` and not an interleaved order like
/// `A, B, D, C, E, F` (which a queue would produce):
///
/// ```text
/// A - B - C
/// \ \
/// D - E - F
/// ```
///
/// When a node is visited and added to the list of sorted nodes, it is removed
/// as parent from all its children. Those who had no other parents are added to
/// the stack in reverse order. In the final list, the children appear in the
/// order they appeared in the parent child pairs, if possible. This means that
/// the order of the commits and of the pairs matters and should probably be
/// deterministic.
///
/// [0]: https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm
pub fn sort_topologically(
commits: &[String],
parent_child_pairs: &[(String, String)],
) -> Vec<String> {
// These maps have entries for each commit hash we might want to inspect, so
// we know `.get()`, `.get_mut()` and `.remove()` must always succeed.
let mut parent_child_map = commits
.iter()
.map(|hash| (hash.clone(), Vec::<String>::new()))
.collect::<HashMap<_, _>>();
let mut child_parent_map = commits
.iter()
.map(|hash| (hash.clone(), HashSet::<String>::new()))
.collect::<HashMap<_, _>>();
for (parent, child) in parent_child_pairs {
if parent_child_map.contains_key(parent) && parent_child_map.contains_key(child) {
parent_child_map
.get_mut(parent)
.unwrap()
.push(child.clone());
child_parent_map
.get_mut(child)
.unwrap()
.insert(parent.clone());
}
}
// Initialize parentless stack using commit list, in reverse order so that
// the order is right when popping.
let mut parentless = Vec::<String>::new();
for commit in commits.iter().rev() {
if child_parent_map[commit].is_empty() {
// A (quadratic-time) linear scan here is OK since the number of
// parentless commits is usually fairly small.
if !parentless.contains(commit) {
parentless.push(commit.clone());
}
}
}
let mut sorted = Vec::<String>::new();
while let Some(hash) = parentless.pop() {
// Inspect children in reverse order so that the order is right when
// popping off the parentless stack.
for child in parent_child_map.remove(&hash).unwrap().into_iter().rev() {
let child_parents = child_parent_map.get_mut(&child).unwrap();
child_parents.remove(&hash);
if child_parents.is_empty() {
parentless.push(child);
}
}
sorted.push(hash);
}
assert!(parent_child_map.is_empty());
assert!(child_parent_map.values().all(|v| v.is_empty()));
assert!(parentless.is_empty());
assert_eq!(commits.len(), sorted.len());
sorted
}

View file

@ -18,8 +18,12 @@ pub struct PathGraph {}
pub struct PathGraphMetrics {}
#[derive(Deserialize, TypedPath)]
#[typed_path("/graph/data")]
pub struct PathGraphData {}
#[typed_path("/graph/commits")]
pub struct PathGraphCommits {}
#[derive(Deserialize, TypedPath)]
#[typed_path("/graph/measurements")]
pub struct PathGraphMeasurements {}
#[derive(Deserialize, TypedPath)]
#[typed_path("/queue/")]