diff --git a/src/compute-client/build.rs b/src/compute-client/build.rs index 6e1a3f9b935ac..0b8742504b28d 100644 --- a/src/compute-client/build.rs +++ b/src/compute-client/build.rs @@ -13,7 +13,9 @@ fn main() { env::set_var("PROTOC", protobuf_src::protoc()); let mut config = prost_build::Config::new(); - config.btree_map(["."]); + config + .btree_map(["."]) + .type_attribute(".", "#[allow(missing_docs)]"); tonic_build::configure() // Enabling `emit_rerun_if_changed` will rerun the build script when diff --git a/src/compute-client/src/controller.rs b/src/compute-client/src/controller.rs index e71b089229ec4..fae53e31206fd 100644 --- a/src/compute-client/src/controller.rs +++ b/src/compute-client/src/controller.rs @@ -82,12 +82,18 @@ pub enum ComputeControllerResponse { /// See [`ComputeResponse::SubscribeResponse`]. SubscribeResponse(GlobalId, SubscribeResponse), /// See [`ComputeResponse::FrontierUpper`] - FrontierUpper { id: GlobalId, upper: Antichain }, + FrontierUpper { + /// TODO(#25239): Add documentation. + id: GlobalId, + /// TODO(#25239): Add documentation. + upper: Antichain, + }, } /// Replica configuration #[derive(Clone, Debug, Serialize, Deserialize)] pub struct ComputeReplicaConfig { + /// TODO(#25239): Add documentation. pub logging: ComputeReplicaLogging, /// The amount of effort to be spent on arrangement compaction during idle times. /// @@ -188,6 +194,7 @@ impl ComputeController { } } + /// TODO(#25239): Add documentation. pub fn instance_exists(&self, id: ComputeInstanceId) -> bool { self.instances.contains_key(&id) } @@ -223,6 +230,7 @@ impl ComputeController { Ok(collection) } + /// TODO(#25239): Add documentation. pub fn find_collection( &self, collection_id: GlobalId, @@ -256,18 +264,22 @@ impl ComputeController { .collection_reverse_dependencies(id)) } + /// TODO(#25239): Add documentation. pub fn set_default_idle_arrangement_merge_effort(&mut self, value: u32) { self.default_idle_arrangement_merge_effort = value; } + /// TODO(#25239): Add documentation. pub fn set_default_arrangement_exert_proportionality(&mut self, value: u32) { self.default_arrangement_exert_proportionality = value; } + /// TODO(#25239): Add documentation. pub fn enable_aggressive_readhold_downgrades(&self) -> bool { self.enable_aggressive_readhold_downgrades } + /// TODO(#25239): Add documentation. pub fn set_enable_aggressive_readhold_downgrades(&mut self, value: bool) { self.enable_aggressive_readhold_downgrades = value; } @@ -443,6 +455,7 @@ pub struct ActiveComputeController<'a, T> { } impl ActiveComputeController<'_, T> { + /// TODO(#25239): Add documentation. pub fn instance_exists(&self, id: ComputeInstanceId) -> bool { self.compute.instance_exists(id) } @@ -822,6 +835,7 @@ impl CollectionState { } } + /// TODO(#25239): Add documentation. pub fn new_log_collection() -> Self { let since = Antichain::from_elem(Timestamp::minimum()); let mut state = Self::new(since, Vec::new(), Vec::new()); diff --git a/src/compute-client/src/controller/error.rs b/src/compute-client/src/controller/error.rs index bd034bf66c01c..8258933120099 100644 --- a/src/compute-client/src/controller/error.rs +++ b/src/compute-client/src/controller/error.rs @@ -41,8 +41,10 @@ pub struct CollectionMissing(pub GlobalId); /// Errors arising during compute collection lookup. #[derive(Error, Debug)] pub enum CollectionLookupError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("collection does not exist: {0}")] CollectionMissing(GlobalId), } @@ -62,10 +64,13 @@ impl From for CollectionLookupError { /// Errors arising during compute replica creation. #[derive(Error, Debug)] pub enum ReplicaCreationError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("replica exists already: {0}")] ReplicaExists(ReplicaId), + /// TODO(#25239): Add documentation. #[error("collection does not exist: {0}")] CollectionMissing(GlobalId), } @@ -91,8 +96,10 @@ impl From for ReplicaCreationError { /// Errors arising during compute replica removal. #[derive(Error, Debug)] pub enum ReplicaDropError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("replica does not exist: {0}")] ReplicaMissing(ReplicaId), } @@ -112,12 +119,16 @@ impl From for ReplicaDropError { /// Errors arising during dataflow creation. #[derive(Error, Debug)] pub enum DataflowCreationError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("collection does not exist: {0}")] CollectionMissing(GlobalId), + /// TODO(#25239): Add documentation. #[error("dataflow definition lacks an as_of value")] MissingAsOf, + /// TODO(#25239): Add documentation. #[error("dataflow has an as_of not beyond the since of collection: {0}")] SinceViolation(GlobalId), } @@ -142,12 +153,16 @@ impl From for DataflowCreationError { /// Errors arising during peek processing. #[derive(Error, Debug)] pub enum PeekError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("collection does not exist: {0}")] CollectionMissing(GlobalId), + /// TODO(#25239): Add documentation. #[error("replica does not exist: {0}")] ReplicaMissing(ReplicaId), + /// TODO(#25239): Add documentation. #[error("peek timestamp is not beyond the since of collection: {0}")] SinceViolation(GlobalId), } @@ -172,8 +187,10 @@ impl From for PeekError { /// Errors arising during collection updates. #[derive(Error, Debug)] pub enum CollectionUpdateError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("collection does not exist: {0}")] CollectionMissing(GlobalId), } @@ -193,10 +210,13 @@ impl From for CollectionUpdateError { /// Errors arising during collection read policy assignment. #[derive(Error, Debug)] pub enum ReadPolicyError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("collection does not exist: {0}")] CollectionMissing(GlobalId), + /// TODO(#25239): Add documentation. #[error("collection is write-only: {0}")] WriteOnlyCollection(GlobalId), } @@ -217,15 +237,19 @@ impl From for ReadPolicyError { } } -// Errors arising during subscribe target assignment. +/// Errors arising during subscribe target assignment. #[derive(Error, Debug)] pub enum SubscribeTargetError { + /// TODO(#25239): Add documentation. #[error("instance does not exist: {0}")] InstanceMissing(ComputeInstanceId), + /// TODO(#25239): Add documentation. #[error("subscribe does not exist: {0}")] SubscribeMissing(GlobalId), + /// TODO(#25239): Add documentation. #[error("replica does not exist: {0}")] ReplicaMissing(ReplicaId), + /// TODO(#25239): Add documentation. #[error("subscribe has already produced output")] SubscribeAlreadyStarted, } @@ -250,6 +274,7 @@ impl From for SubscribeTargetError { /// Errors arising during orphan removal. #[derive(Error, Debug)] pub enum RemoveOrphansError { + /// TODO(#25239): Add documentation. #[error("orchestrator error: {0}")] OrchestratorError(anyhow::Error), } diff --git a/src/compute-client/src/lib.rs b/src/compute-client/src/lib.rs index 9d99dadc62e57..67e58ba972688 100644 --- a/src/compute-client/src/lib.rs +++ b/src/compute-client/src/lib.rs @@ -7,9 +7,7 @@ // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. -// This appears to be defective at the moment, with false positives -// for each variant of the `Command` enum, each of which are documented. -// #![warn(missing_docs)] +#![warn(missing_docs)] //! The public API for the compute layer. diff --git a/src/compute-client/src/logging.rs b/src/compute-client/src/logging.rs index 738b3f2a4104d..8b25e57ab657e 100644 --- a/src/compute-client/src/logging.rs +++ b/src/compute-client/src/logging.rs @@ -101,12 +101,16 @@ impl ProtoMapEntry for ProtoIndexLog { } } +/// TODO(#25239): Add documentation. #[derive( Arbitrary, Hash, Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Copy, Serialize, Deserialize, )] pub enum LogVariant { + /// TODO(#25239): Add documentation. Timely(TimelyLog), + /// TODO(#25239): Add documentation. Differential(DifferentialLog), + /// TODO(#25239): Add documentation. Compute(ComputeLog), } @@ -151,20 +155,32 @@ impl RustType for LogVariant { } } +/// TODO(#25239): Add documentation. #[derive( Arbitrary, Hash, Eq, Ord, PartialEq, PartialOrd, Debug, Clone, Copy, Serialize, Deserialize, )] pub enum TimelyLog { + /// TODO(#25239): Add documentation. Operates, + /// TODO(#25239): Add documentation. Channels, + /// TODO(#25239): Add documentation. Elapsed, + /// TODO(#25239): Add documentation. Histogram, + /// TODO(#25239): Add documentation. Addresses, + /// TODO(#25239): Add documentation. Parks, + /// TODO(#25239): Add documentation. MessagesSent, + /// TODO(#25239): Add documentation. MessagesReceived, + /// TODO(#25239): Add documentation. Reachability, + /// TODO(#25239): Add documentation. BatchesSent, + /// TODO(#25239): Add documentation. BatchesReceived, } @@ -207,16 +223,24 @@ impl RustType for TimelyLog { } } +/// TODO(#25239): Add documentation. #[derive( Arbitrary, Hash, Eq, Ord, PartialEq, PartialOrd, Debug, Clone, Copy, Serialize, Deserialize, )] pub enum DifferentialLog { + /// TODO(#25239): Add documentation. ArrangementBatches, + /// TODO(#25239): Add documentation. ArrangementRecords, + /// TODO(#25239): Add documentation. Sharing, + /// TODO(#25239): Add documentation. BatcherRecords, + /// TODO(#25239): Add documentation. BatcherSize, + /// TODO(#25239): Add documentation. BatcherCapacity, + /// TODO(#25239): Add documentation. BatcherAllocations, } @@ -253,20 +277,32 @@ impl RustType for DifferentialLog { } } +/// TODO(#25239): Add documentation. #[derive( Arbitrary, Hash, Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Copy, Serialize, Deserialize, )] pub enum ComputeLog { + /// TODO(#25239): Add documentation. DataflowCurrent, + /// TODO(#25239): Add documentation. FrontierCurrent, + /// TODO(#25239): Add documentation. PeekCurrent, + /// TODO(#25239): Add documentation. PeekDuration, + /// TODO(#25239): Add documentation. FrontierDelay, + /// TODO(#25239): Add documentation. ImportFrontierCurrent, + /// TODO(#25239): Add documentation. ArrangementHeapSize, + /// TODO(#25239): Add documentation. ArrangementHeapCapacity, + /// TODO(#25239): Add documentation. ArrangementHeapAllocations, + /// TODO(#25239): Add documentation. ShutdownDuration, + /// TODO(#25239): Add documentation. ErrorCount, } @@ -309,6 +345,7 @@ impl RustType for ComputeLog { } } +/// TODO(#25239): Add documentation. pub static DEFAULT_LOG_VARIANTS: Lazy> = Lazy::new(|| { let default_logs = vec![ LogVariant::Timely(TimelyLog::Operates), @@ -350,6 +387,7 @@ impl LogVariant { .unwrap_or_else(|| (0..arity).collect()) } + /// TODO(#25239): Add documentation. pub fn desc(&self) -> RelationDesc { match self { LogVariant::Timely(TimelyLog::Operates) => RelationDesc::empty() diff --git a/src/compute-client/src/metrics.rs b/src/compute-client/src/metrics.rs index c20916739db20..907a34f203e59 100644 --- a/src/compute-client/src/metrics.rs +++ b/src/compute-client/src/metrics.rs @@ -31,6 +31,7 @@ use crate::protocol::response::{PeekResponse, ProtoComputeResponse}; type IntCounter = DeleteOnDropCounter<'static, AtomicU64, Vec>; type Gauge = DeleteOnDropGauge<'static, AtomicF64, Vec>; +/// TODO(#25239): Add documentation. pub type UIntGauge = DeleteOnDropGauge<'static, AtomicU64, Vec>; type Histogram = DeleteOnDropHistogram<'static, Vec>; @@ -64,6 +65,7 @@ pub struct ComputeControllerMetrics { } impl ComputeControllerMetrics { + /// TODO(#25239): Add documentation. pub fn new(metrics_registry: MetricsRegistry) -> Self { ComputeControllerMetrics { commands_total: metrics_registry.register(metric!( @@ -145,6 +147,7 @@ impl ComputeControllerMetrics { } } + /// TODO(#25239): Add documentation. pub fn for_instance(&self, instance_id: ComputeInstanceId) -> InstanceMetrics { let labels = vec![instance_id.to_string()]; let replica_count = self.replica_count.get_delete_on_drop_gauge(labels.clone()); @@ -193,17 +196,26 @@ pub struct InstanceMetrics { instance_id: ComputeInstanceId, metrics: ComputeControllerMetrics, + /// TODO(#25239): Add documentation. pub replica_count: UIntGauge, + /// TODO(#25239): Add documentation. pub collection_count: UIntGauge, + /// TODO(#25239): Add documentation. pub peek_count: UIntGauge, + /// TODO(#25239): Add documentation. pub subscribe_count: UIntGauge, + /// TODO(#25239): Add documentation. pub history_command_count: CommandMetrics, + /// TODO(#25239): Add documentation. pub history_dataflow_count: UIntGauge, + /// TODO(#25239): Add documentation. pub peeks_total: PeekMetrics, + /// TODO(#25239): Add documentation. pub peek_duration_seconds: PeekMetrics, } impl InstanceMetrics { + /// TODO(#25239): Add documentation. pub fn for_replica(&self, replica_id: ReplicaId) -> ReplicaMetrics { let labels = vec![self.instance_id.to_string(), replica_id.to_string()]; let extended_labels = |extra: &str| { @@ -263,6 +275,7 @@ impl InstanceMetrics { } } + /// TODO(#25239): Add documentation. pub fn for_history(&self) -> HistoryMetrics { let labels = vec![self.instance_id.to_string()]; let command_counts = CommandMetrics::build(|typ| { @@ -298,9 +311,11 @@ pub struct ReplicaMetrics { replica_id: ReplicaId, metrics: ComputeControllerMetrics, + /// TODO(#25239): Add documentation. pub inner: Arc, } +/// TODO(#25239): Add documentation. #[derive(Debug)] pub struct ReplicaMetricsInner { commands_total: CommandMetrics, @@ -308,7 +323,9 @@ pub struct ReplicaMetricsInner { responses_total: ResponseMetrics, response_message_bytes_total: ResponseMetrics, + /// TODO(#25239): Add documentation. pub command_queue_size: UIntGauge, + /// TODO(#25239): Add documentation. pub response_queue_size: UIntGauge, } @@ -363,23 +380,33 @@ impl StatsCollector for ReplicaMetric /// Per-replica-and-collection metrics. #[derive(Debug)] pub(crate) struct ReplicaCollectionMetrics { + /// TODO(#25239): Add documentation. pub initial_output_duration_seconds: Gauge, } /// Metrics keyed by `ComputeCommand` type. #[derive(Debug)] pub struct CommandMetrics { + /// TODO(#25239): Add documentation. pub create_timely: M, + /// TODO(#25239): Add documentation. pub create_instance: M, + /// TODO(#25239): Add documentation. pub create_dataflow: M, + /// TODO(#25239): Add documentation. pub allow_compaction: M, + /// TODO(#25239): Add documentation. pub peek: M, + /// TODO(#25239): Add documentation. pub cancel_peek: M, + /// TODO(#25239): Add documentation. pub initialization_complete: M, + /// TODO(#25239): Add documentation. pub update_configuration: M, } impl CommandMetrics { + /// TODO(#25239): Add documentation. pub fn build(build_metric: F) -> Self where F: Fn(&str) -> M, @@ -410,6 +437,7 @@ impl CommandMetrics { f(&self.cancel_peek); } + /// TODO(#25239): Add documentation. pub fn for_command(&self, command: &ComputeCommand) -> &M { use ComputeCommand::*; diff --git a/src/compute-client/src/protocol/command.rs b/src/compute-client/src/protocol/command.rs index e247f3029bed0..c624c978cab6c 100644 --- a/src/compute-client/src/protocol/command.rs +++ b/src/compute-client/src/protocol/command.rs @@ -59,7 +59,9 @@ pub enum ComputeCommand { /// use the `epoch` to ensure that their individual processes agree on which protocol iteration /// they are in. CreateTimely { + /// TODO(#25239): Add documentation. config: TimelyConfig, + /// TODO(#25239): Add documentation. epoch: ClusterStartupEpoch, }, @@ -173,7 +175,9 @@ pub enum ComputeCommand { /// [`FrontierUpper`]: super::response::ComputeResponse::FrontierUpper /// [#16275]: https://github.com/MaterializeInc/materialize/issues/16275 AllowCompaction { + /// TODO(#25239): Add documentation. id: GlobalId, + /// TODO(#25239): Add documentation. frontier: Antichain, }, @@ -328,6 +332,7 @@ impl Arbitrary for ComputeCommand { /// for anything in this struct. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Arbitrary)] pub struct InstanceConfig { + /// TODO(#25239): Add documentation. pub logging: LoggingConfig, } @@ -493,6 +498,7 @@ pub enum PeekTarget { } impl PeekTarget { + /// TODO(#25239): Add documentation. pub fn id(&self) -> GlobalId { match self { PeekTarget::Index { id, .. } => *id, diff --git a/src/compute-client/src/protocol/history.rs b/src/compute-client/src/protocol/history.rs index 6cee95f56e081..57d0c0141f87c 100644 --- a/src/compute-client/src/protocol/history.rs +++ b/src/compute-client/src/protocol/history.rs @@ -19,6 +19,7 @@ use timely::progress::Antichain; use crate::metrics::HistoryMetrics; use crate::protocol::command::{ComputeCommand, ComputeParameters, Peek}; +/// TODO(#25239): Add documentation. #[derive(Debug)] pub struct ComputeCommandHistory { /// The number of commands at the last time we compacted the history. @@ -38,6 +39,7 @@ where M: Borrow, T: timely::progress::Timestamp, { + /// TODO(#25239): Add documentation. pub fn new(metrics: HistoryMetrics) -> Self { metrics.reset(); diff --git a/src/compute-client/src/protocol/response.rs b/src/compute-client/src/protocol/response.rs index 5cb44d8275113..32f1ad5acf7f8 100644 --- a/src/compute-client/src/protocol/response.rs +++ b/src/compute-client/src/protocol/response.rs @@ -74,7 +74,12 @@ pub enum ComputeResponse { /// [`CreateDataflow` command]: super::command::ComputeCommand::CreateDataflow /// [`CreateInstance` command]: super::command::ComputeCommand::CreateInstance /// [#16275]: https://github.com/MaterializeInc/materialize/issues/16275 - FrontierUpper { id: GlobalId, upper: Antichain }, + FrontierUpper { + /// TODO(#25239): Add documentation. + id: GlobalId, + /// TODO(#25239): Add documentation. + upper: Antichain, + }, /// `PeekResponse` reports the result of a previous [`Peek` command]. The peek is identified by /// a `Uuid` that matches the command's [`Peek::uuid`]. @@ -217,6 +222,7 @@ pub enum PeekResponse { } impl PeekResponse { + /// TODO(#25239): Add documentation. pub fn unwrap_rows(self) -> Vec<(Row, NonZeroUsize)> { match self { PeekResponse::Rows(rows) => rows, diff --git a/src/compute-client/src/service.rs b/src/compute-client/src/service.rs index 77d9918a8736f..8716512f40410 100644 --- a/src/compute-client/src/service.rs +++ b/src/compute-client/src/service.rs @@ -57,6 +57,7 @@ impl GenericClient, ComputeResponse> for Box; #[async_trait] diff --git a/src/compute-types/build.rs b/src/compute-types/build.rs index 2001219d8712a..6d39232b56f43 100644 --- a/src/compute-types/build.rs +++ b/src/compute-types/build.rs @@ -13,7 +13,9 @@ fn main() { env::set_var("PROTOC", protobuf_src::protoc()); let mut config = prost_build::Config::new(); - config.btree_map(["."]); + config + .btree_map(["."]) + .type_attribute(".", "#[allow(missing_docs)]"); tonic_build::configure() // Enabling `emit_rerun_if_changed` will rerun the build script when diff --git a/src/compute-types/src/dataflows.rs b/src/compute-types/src/dataflows.rs index a553578258307..9e429fe2f85f4 100644 --- a/src/compute-types/src/dataflows.rs +++ b/src/compute-types/src/dataflows.rs @@ -692,7 +692,9 @@ pub struct IndexImport { /// An association of a global identifier to an expression. #[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct BuildDesc

{ + /// TODO(#25239): Add documentation. pub id: GlobalId, + /// TODO(#25239): Add documentation. pub plan: P, } diff --git a/src/compute-types/src/explain/mod.rs b/src/compute-types/src/explain/mod.rs index 3c7abb697e8e3..d12593921d346 100644 --- a/src/compute-types/src/explain/mod.rs +++ b/src/compute-types/src/explain/mod.rs @@ -151,6 +151,7 @@ impl<'a> DataflowDescription { } } +/// TODO(#25239): Add documentation. pub fn export_ids_for(dd: &DataflowDescription) -> BTreeMap { let mut map = BTreeMap::::default(); diff --git a/src/compute-types/src/lib.rs b/src/compute-types/src/lib.rs index 1612c01e6d8ab..c7ac4ca69e5cc 100644 --- a/src/compute-types/src/lib.rs +++ b/src/compute-types/src/lib.rs @@ -7,6 +7,8 @@ // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. +#![warn(missing_docs)] + //! Shared types for the `mz-compute*` crates use std::time::Duration; diff --git a/src/compute-types/src/plan/interpret/api.rs b/src/compute-types/src/plan/interpret/api.rs index b0ec362ba04e8..86426034bb557 100644 --- a/src/compute-types/src/plan/interpret/api.rs +++ b/src/compute-types/src/plan/interpret/api.rs @@ -48,14 +48,17 @@ use crate::plan::{AvailableCollections, GetPlan, Plan}; /// /// TODO(#24943): align this with the `Plan` structure pub trait Interpreter { + /// TODO(#25239): Add documentation. type Domain: Debug + Sized; + /// TODO(#25239): Add documentation. fn constant( &self, ctx: &Context, rows: &Result, EvalError>, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn get( &self, ctx: &Context, @@ -64,6 +67,7 @@ pub trait Interpreter { plan: &GetPlan, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn mfp( &self, ctx: &Context, @@ -72,6 +76,7 @@ pub trait Interpreter { input_key_val: &Option<(Vec, Option)>, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn flat_map( &self, ctx: &Context, @@ -82,6 +87,7 @@ pub trait Interpreter { input_key: &Option>, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn join( &self, ctx: &Context, @@ -89,6 +95,7 @@ pub trait Interpreter { plan: &JoinPlan, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn reduce( &self, ctx: &Context, @@ -99,6 +106,7 @@ pub trait Interpreter { mfp_after: &MapFilterProject, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn top_k( &self, ctx: &Context, @@ -106,8 +114,10 @@ pub trait Interpreter { top_k_plan: &TopKPlan, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn negate(&self, ctx: &Context, input: Self::Domain) -> Self::Domain; + /// TODO(#25239): Add documentation. fn threshold( &self, ctx: &Context, @@ -115,6 +125,7 @@ pub trait Interpreter { threshold_plan: &ThresholdPlan, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn union( &self, ctx: &Context, @@ -122,6 +133,7 @@ pub trait Interpreter { consolidate_output: bool, ) -> Self::Domain; + /// TODO(#25239): Add documentation. fn arrange_by( &self, ctx: &Context, @@ -140,6 +152,8 @@ pub struct InterpreterContext { /// Is the context recursive (i.e., is one of our ancestors a `LetRec` binding) or not. pub is_rec: bool, } + +/// TODO(#25239): Add documentation. pub type Context = InterpreterContext; impl Default for InterpreterContext { @@ -211,6 +225,7 @@ where I: Interpreter, I::Domain: BoundedLattice + Clone, { + /// TODO(#25239): Add documentation. pub fn new(interpreter: I) -> Self { Self { interpret: interpreter, @@ -485,6 +500,7 @@ where I::Domain: BoundedLattice + Clone, A: FnMut(&mut Plan, &I::Domain, &[I::Domain]), { + /// TODO(#25239): Add documentation. pub fn new(interpreter: I, action: A) -> Self { Self { interpret: interpreter, diff --git a/src/compute-types/src/plan/join/mod.rs b/src/compute-types/src/plan/join/mod.rs index 9ac3f3a7c9a9d..7e65c195b8ee4 100644 --- a/src/compute-types/src/plan/join/mod.rs +++ b/src/compute-types/src/plan/join/mod.rs @@ -84,7 +84,9 @@ impl RustType for JoinPlan { /// this with a Rust closure (glorious battle was waged, but ultimately lost). #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, Ord, PartialOrd)] pub struct JoinClosure { + /// TODO(#25239): Add documentation. pub ready_equivalences: Vec>, + /// TODO(#25239): Add documentation. pub before: mz_expr::SafeMfpPlan, } diff --git a/src/compute-types/src/plan/transform/api.rs b/src/compute-types/src/plan/transform/api.rs index 8190488eeef9d..be12fc1ad2743 100644 --- a/src/compute-types/src/plan/transform/api.rs +++ b/src/compute-types/src/plan/transform/api.rs @@ -21,11 +21,13 @@ use crate::plan::Plan; /// as an immutable reference. #[derive(Debug)] pub struct TransformConfig { + /// TODO(#25239): Add documentation. pub monotonic_ids: BTreeSet, } /// A transform for [crate::plan::Plan] nodes. pub trait Transform { + /// TODO(#25239): Add documentation. fn name(&self) -> &'static str; /// Transform a [Plan] using the given [TransformConfig]. @@ -56,6 +58,7 @@ pub trait Transform { ) -> Result<(), RecursionLimitError>; } +/// TODO(#25239): Add documentation. pub trait BottomUpTransform { /// A type representing analysis information to be associated with each /// sub-term and exposed to the transformation action callback. diff --git a/src/compute-types/src/plan/transform/relax_must_consolidate.rs b/src/compute-types/src/plan/transform/relax_must_consolidate.rs index a67f02b0b09f5..746f94539eec2 100644 --- a/src/compute-types/src/plan/transform/relax_must_consolidate.rs +++ b/src/compute-types/src/plan/transform/relax_must_consolidate.rs @@ -27,6 +27,7 @@ pub struct RelaxMustConsolidate { } impl RelaxMustConsolidate { + /// TODO(#25239): Add documentation. pub fn new() -> Self { RelaxMustConsolidate { _phantom: Default::default(), diff --git a/src/compute-types/src/sinks.rs b/src/compute-types/src/sinks.rs index 643375a8f55f1..c68b60c903e6e 100644 --- a/src/compute-types/src/sinks.rs +++ b/src/compute-types/src/sinks.rs @@ -23,12 +23,19 @@ include!(concat!(env!("OUT_DIR"), "/mz_compute_types.sinks.rs")); /// A sink for updates to a relational collection. #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] pub struct ComputeSinkDesc { + /// TODO(#25239): Add documentation. pub from: GlobalId, + /// TODO(#25239): Add documentation. pub from_desc: RelationDesc, + /// TODO(#25239): Add documentation. pub connection: ComputeSinkConnection, + /// TODO(#25239): Add documentation. pub with_snapshot: bool, + /// TODO(#25239): Add documentation. pub up_to: Antichain, + /// TODO(#25239): Add documentation. pub non_null_assertions: Vec, + /// TODO(#25239): Add documentation. pub refresh_schedule: Option, } @@ -103,10 +110,14 @@ impl RustType for ComputeSinkDesc { + /// TODO(#25239): Add documentation. Subscribe(SubscribeSinkConnection), + /// TODO(#25239): Add documentation. Persist(PersistSinkConnection), + /// TODO(#25239): Add documentation. S3Oneshot(S3OneshotSinkConnection), } @@ -155,12 +166,16 @@ impl RustType for ComputeSinkConnection for S3OneshotSinkConnection { } } +/// TODO(#25239): Add documentation. #[derive(Arbitrary, Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] pub struct PersistSinkConnection { + /// TODO(#25239): Add documentation. pub value_desc: RelationDesc, + /// TODO(#25239): Add documentation. pub storage_metadata: S, } diff --git a/src/compute/src/extensions/mod.rs b/src/compute/src/extensions/mod.rs index aecfbc006b126..89dd28b94bfcb 100644 --- a/src/compute/src/extensions/mod.rs +++ b/src/compute/src/extensions/mod.rs @@ -7,8 +7,6 @@ // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0. -#![warn(missing_docs)] - //! Operator extensions to Timely and Differential pub(crate) mod arrange;