1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
use crate::{
fragment::{Logs, Pool},
intercom::{NetworkMsg, TransactionMsg},
metrics::{Metrics, MetricsBackend},
utils::{
async_msg::{MessageBox, MessageQueue},
task::TokioServiceInfo,
},
};
use futures::{future, TryFutureExt};
use std::{
collections::HashMap,
convert::TryInto,
io,
path::{Path, PathBuf},
};
use thiserror::Error;
use time::{macros::format_description, Duration, OffsetDateTime, Time};
use tokio::fs::{self, File};
use tokio_stream::StreamExt;
use tracing::{debug_span, span, Level};
use tracing_futures::Instrument;
pub struct Process {
pool_max_entries: usize,
logs_max_entries: usize,
network_msg_box: MessageBox<NetworkMsg>,
}
#[derive(Debug, Error)]
pub enum Error {
#[error("transaction pool error")]
Pool(#[from] crate::fragment::pool::Error),
#[error("failed to open persistent log file")]
PersistentLog(#[source] io::Error),
}
impl Process {
pub fn new(
pool_max_entries: usize,
logs_max_entries: usize,
network_msg_box: MessageBox<NetworkMsg>,
) -> Self {
Process {
pool_max_entries,
logs_max_entries,
network_msg_box,
}
}
pub async fn start<P: AsRef<Path>>(
self,
service_info: TokioServiceInfo,
stats_counter: Metrics,
mut input: MessageQueue<TransactionMsg>,
persistent_log_dir: Option<P>,
) -> Result<(), Error> {
async fn hourly_wakeup(enabled: bool) {
if enabled {
let now = OffsetDateTime::now_utc();
// truncate date to hour so that rotation always happens at the hour mark
let current_hour = now.replace_time(Time::from_hms(now.hour(), 0, 0).unwrap());
let next_hour = current_hour + Duration::HOUR;
tokio::time::sleep((next_hour - now).try_into().unwrap()).await
} else {
future::pending().await
}
}
async fn open_log_file(dir: &Path) -> Result<File, Error> {
let mut path: PathBuf = dir.into();
if !path.exists() {
std::fs::create_dir_all(dir).map_err(Error::PersistentLog)?;
}
let log_file_name = OffsetDateTime::now_utc()
.format(format_description!("[year]-[month]-[day]_[hour].log"))
.expect("invalid time format description");
path.push(log_file_name);
tracing::debug!("creating fragment log file `{:?}`", path);
fs::OpenOptions::new()
.append(true)
.create(true)
.read(false)
.open(path)
.map_err(Error::PersistentLog)
.await
}
if self.logs_max_entries < self.pool_max_entries {
tracing::warn!(
"Having 'log_max_entries' < 'pool_max_entries' is not recommendend. Overriding 'log_max_entries' to {}", self.pool_max_entries
);
}
let logs = Logs::new(std::cmp::max(self.logs_max_entries, self.pool_max_entries));
let mut wakeup = Box::pin(hourly_wakeup(persistent_log_dir.is_some()));
async move {
let persistent_log = match &persistent_log_dir {
None => None,
Some(dir) => {
let file = open_log_file(dir.as_ref()).await?;
Some(file)
}
};
let mut pool = Pool::new(
self.pool_max_entries,
logs,
self.network_msg_box,
persistent_log,
stats_counter.clone()
);
loop {
tokio::select! {
maybe_msg = input.next() => {
tracing::trace!("handling new fragment task item");
match maybe_msg {
None => break,
Some(msg) => match msg {
TransactionMsg::SendTransactions { origin, fragments, fail_fast, reply_handle } => {
// Note that we cannot use apply_block here, since we don't have a valid context to which to apply
// those blocks. one valid tx in a given context, could be invalid in another. for example
// fee calculations, existence utxo / account solvency.
// FIXME/TODO check that the txs are valid within themselves with basic requirements (e.g. inputs >= outputs).
// we also want to keep a basic capability to filter away repetitive queries or definitely discarded txid.
// This interface only makes sense for messages coming from arbitrary users (like transaction, certificates),
// for other message we don't want to receive them through this interface, and possibly
// put them in another pool.
let span = debug_span!("incoming_fragments");
async {
let stats_counter = stats_counter.clone();
let summary = pool
.insert_and_propagate_all(origin, fragments, fail_fast)
.await?;
stats_counter.add_tx_recv_cnt(summary.accepted.len());
reply_handle.reply_ok(summary);
Ok::<(), Error>(())
}
.instrument(span)
.await?;
}
TransactionMsg::RemoveTransactions(fragment_ids, status) => {
let span = debug_span!("remove_transactions_in_block");
async {
tracing::debug!(
"removing fragments added to block {:?}: {:?}",
status,
fragment_ids
);
pool.remove_added_to_block(fragment_ids, status);
}.instrument(span).await
}
TransactionMsg::GetLogs(reply_handle) => {
let logs = pool.logs().logs().cloned().collect();
reply_handle.reply_ok(logs);
}
TransactionMsg::GetStatuses(fragment_ids, reply_handle) => {
let mut statuses = HashMap::new();
pool.logs().logs_by_ids(fragment_ids).into_iter().for_each(
|(fragment_id, log)| {
statuses.insert(fragment_id, log.status().clone());
},
);
reply_handle.reply_ok(statuses);
}
TransactionMsg::BranchSwitch(fork_date) => {
tracing::debug!(%fork_date, "pruning logs after branch switch");
pool.prune_after_ledger_branch(fork_date);
}
TransactionMsg::SelectTransactions {
ledger,
selection_alg,
reply_handle,
soft_deadline_future,
hard_deadline_future,
} => {
let span = span!(
Level::DEBUG,
"fragment_selection",
kind = "older_first",
);
async {
let contents = pool
.select(
ledger,
selection_alg,
soft_deadline_future,
hard_deadline_future,
)
.await;
reply_handle.reply_ok(contents);
}
.instrument(span)
.await
}
}
};
tracing::trace!("item handling finished");
}
_ = &mut wakeup => {
async {
pool.close_persistent_log().await;
let dir = persistent_log_dir.as_ref().unwrap();
let file = open_log_file(dir.as_ref()).await?;
pool.set_persistent_log(file);
wakeup = Box::pin(hourly_wakeup(true));
Ok::<_, Error>(())
}
.instrument(debug_span!("persistent_log_rotation")).await?;
}
}
}
Ok(())
}
.instrument(span!(parent: service_info.span(), Level::TRACE, "process", kind = "fragment"))
.await
}
}