#compdef odbc2parquet

autoload -U is-at-least

_odbc2parquet() {
    typeset -A opt_args
    typeset -a _arguments_options
    local ret=1

    if is-at-least 5.2; then
        _arguments_options=(-s -S -C)
    else
        _arguments_options=(-s -C)
    fi

    local context curcontext="$curcontext" state line
    _arguments "${_arguments_options[@]}" : \
'-q[Only print errors to standard error stream. Suppresses warnings and all other log levels independent of the verbose mode]' \
'--quiet[Only print errors to standard error stream. Suppresses warnings and all other log levels independent of the verbose mode]' \
'*-v[Verbose mode (-v, -vv, -vvv, etc)]' \
'*--verbose[Verbose mode (-v, -vv, -vvv, etc)]' \
'--no-color[Never emit colors]' \
'-h[Print help (see more with '\''--help'\'')]' \
'--help[Print help (see more with '\''--help'\'')]' \
'-V[Print version]' \
'--version[Print version]' \
":: :_odbc2parquet_commands" \
"*::: :->odbc2parquet" \
&& ret=0
    case $state in
    (odbc2parquet)
        words=($line[1] "${words[@]}")
        (( CURRENT += 1 ))
        curcontext="${curcontext%:*:*}:odbc2parquet-command-$line[1]:"
        case $line[1] in
            (query)
_arguments "${_arguments_options[@]}" : \
'-c+[The connection string used to connect to the ODBC data source. Alternatively you may specify the ODBC dsn]:CONNECTION_STRING:_default' \
'--connection-string=[The connection string used to connect to the ODBC data source. Alternatively you may specify the ODBC dsn]:CONNECTION_STRING:_default' \
'(-c --connection-string)--dsn=[ODBC Data Source Name. Either this or the connection string must be specified to identify the datasource. Data source name (dsn) and connection string, may not be specified both]:DSN:_default' \
'-u+[User used to access the datasource specified in dsn. Should you specify a connection string instead of a Data Source Name the user name is going to be appended at the end of it as the \`UID\` attribute]:USER:_default' \
'--user=[User used to access the datasource specified in dsn. Should you specify a connection string instead of a Data Source Name the user name is going to be appended at the end of it as the \`UID\` attribute]:USER:_default' \
'-p+[Password used to log into the datasource. Only used if dsn is specified, instead of a connection string. Should you specify a Connection string instead of a Data Source Name the password is going to be appended at the end of it as the \`PWD\` attribute]:PASSWORD:_default' \
'--password=[Password used to log into the datasource. Only used if dsn is specified, instead of a connection string. Should you specify a Connection string instead of a Data Source Name the password is going to be appended at the end of it as the \`PWD\` attribute]:PASSWORD:_default' \
'--batch-size-row=[Size of a single batch in rows. The content of the data source is written into the output parquet files in batches. This way the content does never need to be materialized completely in memory at once. If \`--batch-size-memory\` is not specified this value defaults to 65535. This avoids issues with some ODBC drivers using 16Bit integers to represent batch sizes. If \`--batch-size-memory\` is specified no other limit is applied by default. If both option are specified the batch size is the largest possible which satisfies both constraints]:BATCH_SIZE_ROW:_default' \
'--batch-size-memory=[Limits the size of a single batch. It does so by calculating the amount of memory each row requires in the allocated buffers and then limits the maximum number of rows so that the maximum buffer size comes as close as possible, but does not exceed the specified amount. Default is 2GiB on 64 Bit platforms and 1GiB on 32 Bit Platforms if \`--batch-size-row\` is not specified. If \`--batch-size-row\` is not specified no memory limit is applied by default. If both option are specified the batch size is the largest possible which satisfies both constraints. This option controls the size of the buffers of data in transit, and therefore the memory usage of this tool. It indirectly controls the size of the row groups written to parquet (since each batch is written as one row group). It is hard to make a generic statement about how much smaller the average row group will be. This options allows you to specify the memory usage using SI units. So you can pass \`2Gib\`, \`600Mb\` and so on]:BATCH_SIZE_MEMORY:_default' \
'--row-groups-per-file=[Maximum number of batches in a single output parquet file. If this option is omitted or 0 a single output file is produces. Otherwise each output file is closed after the maximum number of batches have been written and a new one with the suffix \`_n\` is started. There n is the of the produced output file starting at one for the first one. E.g. \`out_01.par\`, \`out_2.par\`, ...]:ROW_GROUPS_PER_FILE:_default' \
'--file-size-threshold=[Then the size of the currently written parquet files goes beyond this threshold the current row group will be finished and then the file will be closed. So the file will be somewhat larger than the threshold. All further row groups will be written into new files to which the threshold size limit is applied as well. If this option is not set, no size threshold is applied. If the threshold is applied the first file name will have the suffix \`_01\`, the second the suffix \`_2\` and so on. Therefore, the first resulting file will be called e.g. \`out_1.par\`, if \`out.par\` has been specified as the output argument. Also note that this option will not act as an upper bound. It will act as a lower bound for all but the last file, all others however will not be larger than this threshold by more than the size of one row group. You can use the \`batch_size_row\` and \`batch_size_memory\` options to control the size of the row groups. Do not expect the \`batch_size_memory\` however to be equal to the row group size. The row group size depends on the actual data in the database, and is due to compression likely much smaller. Values of this option can be specified in SI units. E.g. \`--file-size-threshold 1GiB\`]:FILE_SIZE_THRESHOLD:_default' \
'--column-length-limit=[You can use this to limit the transfer buffer size which is used for an individual variadic sized column]:COLUMN_LENGTH_LIMIT:_default' \
'--column-compression-default=[Default compression used by the parquet file writer]:COLUMN_COMPRESSION_DEFAULT:(uncompressed gzip lz4 lz0 zstd snappy brotli)' \
'--column-compression-level-default=[The \`gzip\`, \`zstd\` and \`brotli\` compression variants allow for specifying an explicit compression level. If the selected compression variant does not support an explicit compression level this option is ignored]:COLUMN_COMPRESSION_LEVEL_DEFAULT:_default' \
'--encoding=[Encoding used for character data requested from the data source]:ENCODING:(system utf16 auto)' \
'*--parquet-column-encoding=[Specify the fallback encoding of the parquet output column. You can parse multiple values in format \`COLUMN\:ENCODING\`. \`ENCODING\` must be one of\: \`plain\`, \`delta-binary-packed\`, \`delta-byte-array\`, \`delta-length-byte-array\` or \`rle\`]:PARQUET_COLUMN_ENCODING:_default' \
'--suffix-length=[In case fetch results gets split into multiple files a suffix with a number will be appended to each file name. Default suffix length is 2 leading to suffixes like e.g. \`_03\`. In case you would expect thousands of files in your output you may want to set this to say \`4\` so the zeros pad this to a 4 digit number in order to make the filenames more friendly for lexical sorting]:SUFFIX_LENGTH:_default' \
'(--dsn)--prompt[Prompts the user for missing information from the connection string. Only supported on windows platform]' \
'--sequential-fetching[Trade speed for memory. If \`true\`, only one fetch buffer is allocated. It usually takes way more memory than the buffers required to write into parquet, since it contains the data uncompressed and must be able to hold the largest possible value of fields, even if the actual data is small. So only using one instead of two usually halfes the required memory, yet it blocks fetching the next batch from the database, until the contents of the current one have been written. This can slow down the creation of parquet up to a factor of two in in case writing to parquet takes just as much time as fetching from the database. Usually io to the database is the bottlneck so the actual slow down is likely lower, but often still significant]' \
'--prefer-varbinary[Map \`BINARY\` SQL columns to \`BYTE_ARRAY\` instead of \`FIXED_LEN_BYTE_ARRAY\`. This flag has been introduced in an effort to increase the compatibility of the output with Apache Spark]' \
'--driver-does-not-support-64bit-integers[Tells the odbc2parquet, that the ODBC driver does not support binding 64-Bit integers (aka S_C_BIGINT in ODBC speak). This will cause the odbc2parquet to query large integers as text instead and convert them to 64-Bit integers itself. Setting this flag will not affect the output, but may incur a performance penalty. In case you are using an Oracle Database it can make queries work which did not before, because Oracle does not support 64-Bit integers]' \
'--avoid-decimal[Use this flag if you want to avoid the logical type DECIMAL in the produced output. E.g. because you want to process it with polars which does not support DECIMAL. In case the scale of the relational Decimal type is 0, the output will be mapped to either 32Bit or 64Bit Integeres with logical type none. If the scale is not 0 the Decimal column will be fetches as text]' \
'--no-empty-file[In case the query comes back with a result set, but now rows, by default a file with only schema information is still created. If you do not want to create any file in case the result set is empty you can set this flag]' \
'-h[Print help (see more with '\''--help'\'')]' \
'--help[Print help (see more with '\''--help'\'')]' \
':output -- Name of the output parquet file. Use `-` to indicate that the output should be written to standard out instead. This option does nothing if the output is written to standard out:_default' \
':query -- Query executed against the ODBC data source. Question marks (`?`) can be used as placeholders for positional parameters. E.g. "SELECT Name FROM Employees WHERE salary > ?;". Instead of passing a query verbatim, you may pass a plain dash (`-`), to indicate that the query should be read from standard input. In this case the entire input until EOF will be considered the query:_default' \
'*::parameters -- For each placeholder question mark (`?`) in the query text one parameter must be passed at the end of the command line:_default' \
&& ret=0
;;
(list-drivers)
_arguments "${_arguments_options[@]}" : \
'-h[Print help]' \
'--help[Print help]' \
&& ret=0
;;
(list-data-sources)
_arguments "${_arguments_options[@]}" : \
'-h[Print help]' \
'--help[Print help]' \
&& ret=0
;;
(insert)
_arguments "${_arguments_options[@]}" : \
'-c+[The connection string used to connect to the ODBC data source. Alternatively you may specify the ODBC dsn]:CONNECTION_STRING:_default' \
'--connection-string=[The connection string used to connect to the ODBC data source. Alternatively you may specify the ODBC dsn]:CONNECTION_STRING:_default' \
'(-c --connection-string)--dsn=[ODBC Data Source Name. Either this or the connection string must be specified to identify the datasource. Data source name (dsn) and connection string, may not be specified both]:DSN:_default' \
'-u+[User used to access the datasource specified in dsn. Should you specify a connection string instead of a Data Source Name the user name is going to be appended at the end of it as the \`UID\` attribute]:USER:_default' \
'--user=[User used to access the datasource specified in dsn. Should you specify a connection string instead of a Data Source Name the user name is going to be appended at the end of it as the \`UID\` attribute]:USER:_default' \
'-p+[Password used to log into the datasource. Only used if dsn is specified, instead of a connection string. Should you specify a Connection string instead of a Data Source Name the password is going to be appended at the end of it as the \`PWD\` attribute]:PASSWORD:_default' \
'--password=[Password used to log into the datasource. Only used if dsn is specified, instead of a connection string. Should you specify a Connection string instead of a Data Source Name the password is going to be appended at the end of it as the \`PWD\` attribute]:PASSWORD:_default' \
'--encoding=[Encoding used for transferring character data to the database]:ENCODING:(system utf16 auto)' \
'(--dsn)--prompt[Prompts the user for missing information from the connection string. Only supported on windows platform]' \
'-h[Print help (see more with '\''--help'\'')]' \
'--help[Print help (see more with '\''--help'\'')]' \
':input -- Path to the input parquet file which is used to fill the database table with values:_files' \
':table -- Name of the table to insert the values into. No precautions against SQL injection are taken. The insert statement is created by the tool. It will only work if the column names are the same in the parquet file and the database:_default' \
&& ret=0
;;
(completions)
_arguments "${_arguments_options[@]}" : \
'-o+[Output file. Defaults to \`-\` which means standard output]:OUTPUT:_default' \
'--output=[Output file. Defaults to \`-\` which means standard output]:OUTPUT:_default' \
'-h[Print help]' \
'--help[Print help]' \
':shell -- Name of the shell to generate completions for:(bash elvish fish powershell zsh)' \
&& ret=0
;;
(exec)
_arguments "${_arguments_options[@]}" : \
'-c+[The connection string used to connect to the ODBC data source. Alternatively you may specify the ODBC dsn]:CONNECTION_STRING:_default' \
'--connection-string=[The connection string used to connect to the ODBC data source. Alternatively you may specify the ODBC dsn]:CONNECTION_STRING:_default' \
'(-c --connection-string)--dsn=[ODBC Data Source Name. Either this or the connection string must be specified to identify the datasource. Data source name (dsn) and connection string, may not be specified both]:DSN:_default' \
'-u+[User used to access the datasource specified in dsn. Should you specify a connection string instead of a Data Source Name the user name is going to be appended at the end of it as the \`UID\` attribute]:USER:_default' \
'--user=[User used to access the datasource specified in dsn. Should you specify a connection string instead of a Data Source Name the user name is going to be appended at the end of it as the \`UID\` attribute]:USER:_default' \
'-p+[Password used to log into the datasource. Only used if dsn is specified, instead of a connection string. Should you specify a Connection string instead of a Data Source Name the password is going to be appended at the end of it as the \`PWD\` attribute]:PASSWORD:_default' \
'--password=[Password used to log into the datasource. Only used if dsn is specified, instead of a connection string. Should you specify a Connection string instead of a Data Source Name the password is going to be appended at the end of it as the \`PWD\` attribute]:PASSWORD:_default' \
'--encoding=[Encoding used for transferring character data to the database]:ENCODING:(system utf16 auto)' \
'(--dsn)--prompt[Prompts the user for missing information from the connection string. Only supported on windows platform]' \
'-h[Print help (see more with '\''--help'\'')]' \
'--help[Print help (see more with '\''--help'\'')]' \
':input -- Path to the input parquet file which is used to fill the database table with values:_files' \
':statement -- SQL statement to execute. You can bind the columns of the parquet file to input parameters of the statement. You can do this by using the column name of the parquet file surrounded by question marks (`?`). E.g. `INSERT INTO table (col1, col2) VALUES (?col1?, ?col2?)`. In case you want to use the `?` in a capacity different from a placeholder it must be escaped with a backslash (`\?`). Backslashes must also be escaped with another backslash. Keep in mind that your shell may also need escaping for backslashes. You may need four backslashes in total to write a singe backslash in e.g. a string literal (`\\\\`):_default' \
&& ret=0
;;
(help)
_arguments "${_arguments_options[@]}" : \
":: :_odbc2parquet__help_commands" \
"*::: :->help" \
&& ret=0

    case $state in
    (help)
        words=($line[1] "${words[@]}")
        (( CURRENT += 1 ))
        curcontext="${curcontext%:*:*}:odbc2parquet-help-command-$line[1]:"
        case $line[1] in
            (query)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
(list-drivers)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
(list-data-sources)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
(insert)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
(completions)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
(exec)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
(help)
_arguments "${_arguments_options[@]}" : \
&& ret=0
;;
        esac
    ;;
esac
;;
        esac
    ;;
esac
}

(( $+functions[_odbc2parquet_commands] )) ||
_odbc2parquet_commands() {
    local commands; commands=(
'query:Query a data source and write the result as parquet' \
'list-drivers:List available drivers and their attributes' \
'list-data-sources:List preconfigured data sources. Useful to find data source name to connect to database' \
'insert:Read the content of a parquet and insert it into a table' \
'completions:Generate shell completions' \
'exec:Executes an arbitrary SQL statement using the contents of an parquet file as input arrays' \
'help:Print this message or the help of the given subcommand(s)' \
    )
    _describe -t commands 'odbc2parquet commands' commands "$@"
}
(( $+functions[_odbc2parquet__completions_commands] )) ||
_odbc2parquet__completions_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet completions commands' commands "$@"
}
(( $+functions[_odbc2parquet__exec_commands] )) ||
_odbc2parquet__exec_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet exec commands' commands "$@"
}
(( $+functions[_odbc2parquet__help_commands] )) ||
_odbc2parquet__help_commands() {
    local commands; commands=(
'query:Query a data source and write the result as parquet' \
'list-drivers:List available drivers and their attributes' \
'list-data-sources:List preconfigured data sources. Useful to find data source name to connect to database' \
'insert:Read the content of a parquet and insert it into a table' \
'completions:Generate shell completions' \
'exec:Executes an arbitrary SQL statement using the contents of an parquet file as input arrays' \
'help:Print this message or the help of the given subcommand(s)' \
    )
    _describe -t commands 'odbc2parquet help commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__completions_commands] )) ||
_odbc2parquet__help__completions_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help completions commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__exec_commands] )) ||
_odbc2parquet__help__exec_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help exec commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__help_commands] )) ||
_odbc2parquet__help__help_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help help commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__insert_commands] )) ||
_odbc2parquet__help__insert_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help insert commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__list-data-sources_commands] )) ||
_odbc2parquet__help__list-data-sources_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help list-data-sources commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__list-drivers_commands] )) ||
_odbc2parquet__help__list-drivers_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help list-drivers commands' commands "$@"
}
(( $+functions[_odbc2parquet__help__query_commands] )) ||
_odbc2parquet__help__query_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet help query commands' commands "$@"
}
(( $+functions[_odbc2parquet__insert_commands] )) ||
_odbc2parquet__insert_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet insert commands' commands "$@"
}
(( $+functions[_odbc2parquet__list-data-sources_commands] )) ||
_odbc2parquet__list-data-sources_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet list-data-sources commands' commands "$@"
}
(( $+functions[_odbc2parquet__list-drivers_commands] )) ||
_odbc2parquet__list-drivers_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet list-drivers commands' commands "$@"
}
(( $+functions[_odbc2parquet__query_commands] )) ||
_odbc2parquet__query_commands() {
    local commands; commands=()
    _describe -t commands 'odbc2parquet query commands' commands "$@"
}

if [ "$funcstack[1]" = "_odbc2parquet" ]; then
    _odbc2parquet "$@"
else
    compdef _odbc2parquet odbc2parquet
fi
