diff --git a/docs/source/conf.py b/docs/source/conf.py index f64c71f399fd..51408f4fa76f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -119,3 +119,9 @@ # enable nice rendering of checkboxes for the task lists myst_enable_extensions = ["colon_fence", "deflist", "tasklist"] + +# Some code blocks (sql) are not being highlighted correctly, due to the +# presence of some special characters like: 🚀, å, {,... But this isn’t a major +# issue for our documentation. So, suppress these warnings to keep our build +# log cleaner. +suppress_warnings = ['misc.highlighting_failure'] diff --git a/docs/source/contributor-guide/howtos.md b/docs/source/contributor-guide/howtos.md index f105ab2c42db..e4515cee4218 100644 --- a/docs/source/contributor-guide/howtos.md +++ b/docs/source/contributor-guide/howtos.md @@ -49,6 +49,7 @@ Below is a checklist of what you need to do to add a new scalar function to Data - Run `./dev/update_function_docs.sh` to update docs [advanced_udf.rs]: https://github.com/apache/datafusion/blob/main/datafusion-examples/examples/advanced_udaf.rs +[datafusion/expr/src]: https://github.com/apache/datafusion/tree/main/datafusion/expr/src [sqllogictest/test_files]: https://github.com/apache/datafusion/tree/main/datafusion/sqllogictest/test_files ## How to add a new aggregate function @@ -56,7 +57,7 @@ Below is a checklist of what you need to do to add a new scalar function to Data Below is a checklist of what you need to do to add a new aggregate function to DataFusion: - Add the actual implementation of an `Accumulator` and `AggregateExpr`: -- In [datafusion/expr/src](../../../datafusion/expr/src/aggregate_function.rs), add: +- In [datafusion/expr/src], add: - a new variant to `AggregateFunction` - a new entry to `FromStr` with the name of the function as called by SQL - a new line in `return_type` with the expected return type of the function, given an incoming type diff --git a/docs/source/library-user-guide/profiling.md b/docs/source/library-user-guide/profiling.md index 02f6958d1728..40fae6f44705 100644 --- a/docs/source/library-user-guide/profiling.md +++ b/docs/source/library-user-guide/profiling.md @@ -21,7 +21,7 @@ The section contains examples how to perform CPU profiling for Apache DataFusion on different operating systems. -### Building a flamegraph +## Building a flamegraph [Video: how to CPU profile DataFusion with a Flamegraph](https://youtu.be/2z11xtYw_xs) @@ -34,7 +34,7 @@ in images such as this: ## MacOS -#### Step 1: Install the flamegraph Tool +### Step 1: Install the flamegraph Tool To install flamegraph, run: @@ -42,11 +42,11 @@ To install flamegraph, run: cargo install flamegraph ``` -#### Step 2: Prepare Your Environment +### Step 2: Prepare Your Environment Ensure that you're in the directory containing the necessary data files for your DataFusion query. The flamegraph tool will profile the execution of your query against this data. -#### Step 3: Running the Flamegraph Tool +### Step 3: Running the Flamegraph Tool To generate a flamegraph, you'll need to use the -- separator to pass arguments to the binary you're profiling. For datafusion-cli, you need to make sure to run the command with sudo permissions (especially on macOS, where DTrace requires elevated privileges). diff --git a/docs/source/library-user-guide/query-optimizer.md b/docs/source/library-user-guide/query-optimizer.md index 5aacfaf59cb1..c2c60af85f4c 100644 --- a/docs/source/library-user-guide/query-optimizer.md +++ b/docs/source/library-user-guide/query-optimizer.md @@ -68,7 +68,7 @@ let optimizer = Optimizer::with_rules(vec![ ## Writing Optimization Rules Please refer to the -[optimizer_rule.rs](../../datafusion-examples/examples/optimizer_rule.rs) +[optimizer_rule.rs](../../../datafusion-examples/examples/optimizer_rule.rs) example to learn more about the general approach to writing optimizer rules and then move onto studying the existing rules. diff --git a/docs/source/user-guide/sql/scalar_functions.md b/docs/source/user-guide/sql/scalar_functions.md index b92b815d7c95..232efb02d423 100644 --- a/docs/source/user-guide/sql/scalar_functions.md +++ b/docs/source/user-guide/sql/scalar_functions.md @@ -3983,44 +3983,42 @@ make_map(['key1', 'key2'], ['value1', 'value2']) #### Example -````sql - -- Using map function - SELECT MAP('type', 'test'); - ---- - {type: test} - - SELECT MAP(['POST', 'HEAD', 'PATCH'], [41, 33, null]); - ---- - {POST: 41, HEAD: 33, PATCH: } +```sql +-- Using map function +SELECT MAP('type', 'test'); +---- +{type: test} - SELECT MAP([[1,2], [3,4]], ['a', 'b']); - ---- - {[1, 2]: a, [3, 4]: b} +SELECT MAP(['POST', 'HEAD', 'PATCH'], [41, 33, null]); +---- +{POST: 41, HEAD: 33, PATCH: } - SELECT MAP { 'a': 1, 'b': 2 }; - ---- - {a: 1, b: 2} +SELECT MAP([[1,2], [3,4]], ['a', 'b']); +---- +{[1, 2]: a, [3, 4]: b} - -- Using make_map function - SELECT MAKE_MAP(['POST', 'HEAD'], [41, 33]); - ---- - {POST: 41, HEAD: 33} +SELECT MAP { 'a': 1, 'b': 2 }; +---- +{a: 1, b: 2} - SELECT MAKE_MAP(['key1', 'key2'], ['value1', null]); - ---- - {key1: value1, key2: } - ``` +-- Using make_map function +SELECT MAKE_MAP(['POST', 'HEAD'], [41, 33]); +---- +{POST: 41, HEAD: 33} +SELECT MAKE_MAP(['key1', 'key2'], ['value1', null]); +---- +{key1: value1, key2: } +``` ### `map_extract` Returns a list containing the value for the given key or an empty list if the key is not present in the map. -```` - +``` map_extract(map, key) +``` -```` #### Arguments - **map**: Map expression. Can be a constant, column, or function, and any combination of map operators. @@ -4040,7 +4038,7 @@ SELECT map_extract(MAP {1: 'one', 2: 'two'}, 2); SELECT map_extract(MAP {'x': 10, 'y': NULL, 'z': 30}, 'y'); ---- [] -```` +``` #### Aliases