Edit on GitHub

sqlglot.dialects.bigquery

   1from __future__ import annotations
   2
   3import logging
   4import re
   5import typing as t
   6
   7from sqlglot import exp, generator, parser, tokens, transforms
   8from sqlglot._typing import E
   9from sqlglot.dialects.dialect import (
  10    Dialect,
  11    NormalizationStrategy,
  12    annotate_with_type_lambda,
  13    arg_max_or_min_no_count,
  14    binary_from_function,
  15    date_add_interval_sql,
  16    datestrtodate_sql,
  17    build_formatted_time,
  18    filter_array_using_unnest,
  19    if_sql,
  20    inline_array_unless_query,
  21    max_or_greatest,
  22    min_or_least,
  23    no_ilike_sql,
  24    build_date_delta_with_interval,
  25    regexp_replace_sql,
  26    rename_func,
  27    sha256_sql,
  28    timestrtotime_sql,
  29    ts_or_ds_add_cast,
  30    unit_to_var,
  31    strposition_sql,
  32    groupconcat_sql,
  33    space_sql,
  34)
  35from sqlglot.helper import seq_get, split_num_words
  36from sqlglot.tokens import TokenType
  37from sqlglot.generator import unsupported_args
  38
  39if t.TYPE_CHECKING:
  40    from sqlglot._typing import Lit
  41
  42    from sqlglot.optimizer.annotate_types import TypeAnnotator
  43
  44logger = logging.getLogger("sqlglot")
  45
  46
  47JSON_EXTRACT_TYPE = t.Union[exp.JSONExtract, exp.JSONExtractScalar, exp.JSONExtractArray]
  48
  49DQUOTES_ESCAPING_JSON_FUNCTIONS = ("JSON_QUERY", "JSON_VALUE", "JSON_QUERY_ARRAY")
  50
  51
  52def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
  53    if not expression.find_ancestor(exp.From, exp.Join):
  54        return self.values_sql(expression)
  55
  56    structs = []
  57    alias = expression.args.get("alias")
  58    for tup in expression.find_all(exp.Tuple):
  59        field_aliases = (
  60            alias.columns
  61            if alias and alias.columns
  62            else (f"_c{i}" for i in range(len(tup.expressions)))
  63        )
  64        expressions = [
  65            exp.PropertyEQ(this=exp.to_identifier(name), expression=fld)
  66            for name, fld in zip(field_aliases, tup.expressions)
  67        ]
  68        structs.append(exp.Struct(expressions=expressions))
  69
  70    # Due to `UNNEST_COLUMN_ONLY`, it is expected that the table alias be contained in the columns expression
  71    alias_name_only = exp.TableAlias(columns=[alias.this]) if alias else None
  72    return self.unnest_sql(
  73        exp.Unnest(expressions=[exp.array(*structs, copy=False)], alias=alias_name_only)
  74    )
  75
  76
  77def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
  78    this = expression.this
  79    if isinstance(this, exp.Schema):
  80        this = f"{self.sql(this, 'this')} <{self.expressions(this)}>"
  81    else:
  82        this = self.sql(this)
  83    return f"RETURNS {this}"
  84
  85
  86def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
  87    returns = expression.find(exp.ReturnsProperty)
  88    if expression.kind == "FUNCTION" and returns and returns.args.get("is_table"):
  89        expression.set("kind", "TABLE FUNCTION")
  90
  91        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
  92            expression.set("expression", expression.expression.this)
  93
  94    return self.create_sql(expression)
  95
  96
  97# https://issuetracker.google.com/issues/162294746
  98# workaround for bigquery bug when grouping by an expression and then ordering
  99# WITH x AS (SELECT 1 y)
 100# SELECT y + 1 z
 101# FROM x
 102# GROUP BY x + 1
 103# ORDER by z
 104def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
 105    if isinstance(expression, exp.Select):
 106        group = expression.args.get("group")
 107        order = expression.args.get("order")
 108
 109        if group and order:
 110            aliases = {
 111                select.this: select.args["alias"]
 112                for select in expression.selects
 113                if isinstance(select, exp.Alias)
 114            }
 115
 116            for grouped in group.expressions:
 117                if grouped.is_int:
 118                    continue
 119                alias = aliases.get(grouped)
 120                if alias:
 121                    grouped.replace(exp.column(alias))
 122
 123    return expression
 124
 125
 126def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
 127    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
 128    if isinstance(expression, exp.CTE) and expression.alias_column_names:
 129        cte_query = expression.this
 130
 131        if cte_query.is_star:
 132            logger.warning(
 133                "Can't push down CTE column names for star queries. Run the query through"
 134                " the optimizer or use 'qualify' to expand the star projections first."
 135            )
 136            return expression
 137
 138        column_names = expression.alias_column_names
 139        expression.args["alias"].set("columns", None)
 140
 141        for name, select in zip(column_names, cte_query.selects):
 142            to_replace = select
 143
 144            if isinstance(select, exp.Alias):
 145                select = select.this
 146
 147            # Inner aliases are shadowed by the CTE column names
 148            to_replace.replace(exp.alias_(select, name))
 149
 150    return expression
 151
 152
 153def _build_parse_timestamp(args: t.List) -> exp.StrToTime:
 154    this = build_formatted_time(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
 155    this.set("zone", seq_get(args, 2))
 156    return this
 157
 158
 159def _build_timestamp(args: t.List) -> exp.Timestamp:
 160    timestamp = exp.Timestamp.from_arg_list(args)
 161    timestamp.set("with_tz", True)
 162    return timestamp
 163
 164
 165def _build_date(args: t.List) -> exp.Date | exp.DateFromParts:
 166    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
 167    return expr_type.from_arg_list(args)
 168
 169
 170def _build_to_hex(args: t.List) -> exp.Hex | exp.MD5:
 171    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
 172    arg = seq_get(args, 0)
 173    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.LowerHex(this=arg)
 174
 175
 176def _array_contains_sql(self: BigQuery.Generator, expression: exp.ArrayContains) -> str:
 177    return self.sql(
 178        exp.Exists(
 179            this=exp.select("1")
 180            .from_(exp.Unnest(expressions=[expression.left]).as_("_unnest", table=["_col"]))
 181            .where(exp.column("_col").eq(expression.right))
 182        )
 183    )
 184
 185
 186def _ts_or_ds_add_sql(self: BigQuery.Generator, expression: exp.TsOrDsAdd) -> str:
 187    return date_add_interval_sql("DATE", "ADD")(self, ts_or_ds_add_cast(expression))
 188
 189
 190def _ts_or_ds_diff_sql(self: BigQuery.Generator, expression: exp.TsOrDsDiff) -> str:
 191    expression.this.replace(exp.cast(expression.this, exp.DataType.Type.TIMESTAMP))
 192    expression.expression.replace(exp.cast(expression.expression, exp.DataType.Type.TIMESTAMP))
 193    unit = unit_to_var(expression)
 194    return self.func("DATE_DIFF", expression.this, expression.expression, unit)
 195
 196
 197def _unix_to_time_sql(self: BigQuery.Generator, expression: exp.UnixToTime) -> str:
 198    scale = expression.args.get("scale")
 199    timestamp = expression.this
 200
 201    if scale in (None, exp.UnixToTime.SECONDS):
 202        return self.func("TIMESTAMP_SECONDS", timestamp)
 203    if scale == exp.UnixToTime.MILLIS:
 204        return self.func("TIMESTAMP_MILLIS", timestamp)
 205    if scale == exp.UnixToTime.MICROS:
 206        return self.func("TIMESTAMP_MICROS", timestamp)
 207
 208    unix_seconds = exp.cast(
 209        exp.Div(this=timestamp, expression=exp.func("POW", 10, scale)), exp.DataType.Type.BIGINT
 210    )
 211    return self.func("TIMESTAMP_SECONDS", unix_seconds)
 212
 213
 214def _build_time(args: t.List) -> exp.Func:
 215    if len(args) == 1:
 216        return exp.TsOrDsToTime(this=args[0])
 217    if len(args) == 2:
 218        return exp.Time.from_arg_list(args)
 219    return exp.TimeFromParts.from_arg_list(args)
 220
 221
 222def _build_datetime(args: t.List) -> exp.Func:
 223    if len(args) == 1:
 224        return exp.TsOrDsToDatetime.from_arg_list(args)
 225    if len(args) == 2:
 226        return exp.Datetime.from_arg_list(args)
 227    return exp.TimestampFromParts.from_arg_list(args)
 228
 229
 230def _build_regexp_extract(
 231    expr_type: t.Type[E], default_group: t.Optional[exp.Expression] = None
 232) -> t.Callable[[t.List], E]:
 233    def _builder(args: t.List) -> E:
 234        try:
 235            group = re.compile(args[1].name).groups == 1
 236        except re.error:
 237            group = False
 238
 239        # Default group is used for the transpilation of REGEXP_EXTRACT_ALL
 240        return expr_type(
 241            this=seq_get(args, 0),
 242            expression=seq_get(args, 1),
 243            position=seq_get(args, 2),
 244            occurrence=seq_get(args, 3),
 245            group=exp.Literal.number(1) if group else default_group,
 246        )
 247
 248    return _builder
 249
 250
 251def _build_extract_json_with_default_path(expr_type: t.Type[E]) -> t.Callable[[t.List, Dialect], E]:
 252    def _builder(args: t.List, dialect: Dialect) -> E:
 253        if len(args) == 1:
 254            # The default value for the JSONPath is '$' i.e all of the data
 255            args.append(exp.Literal.string("$"))
 256        return parser.build_extract_json_with_path(expr_type)(args, dialect)
 257
 258    return _builder
 259
 260
 261def _str_to_datetime_sql(
 262    self: BigQuery.Generator, expression: exp.StrToDate | exp.StrToTime
 263) -> str:
 264    this = self.sql(expression, "this")
 265    dtype = "DATE" if isinstance(expression, exp.StrToDate) else "TIMESTAMP"
 266
 267    if expression.args.get("safe"):
 268        fmt = self.format_time(
 269            expression,
 270            self.dialect.INVERSE_FORMAT_MAPPING,
 271            self.dialect.INVERSE_FORMAT_TRIE,
 272        )
 273        return f"SAFE_CAST({this} AS {dtype} FORMAT {fmt})"
 274
 275    fmt = self.format_time(expression)
 276    return self.func(f"PARSE_{dtype}", fmt, this, expression.args.get("zone"))
 277
 278
 279def _annotate_math_functions(self: TypeAnnotator, expression: E) -> E:
 280    """
 281    Many BigQuery math functions such as CEIL, FLOOR etc follow this return type convention:
 282    +---------+---------+---------+------------+---------+
 283    |  INPUT  | INT64   | NUMERIC | BIGNUMERIC | FLOAT64 |
 284    +---------+---------+---------+------------+---------+
 285    |  OUTPUT | FLOAT64 | NUMERIC | BIGNUMERIC | FLOAT64 |
 286    +---------+---------+---------+------------+---------+
 287    """
 288    self._annotate_args(expression)
 289
 290    this: exp.Expression = expression.this
 291
 292    self._set_type(
 293        expression,
 294        exp.DataType.Type.DOUBLE if this.is_type(*exp.DataType.INTEGER_TYPES) else this.type,
 295    )
 296    return expression
 297
 298
 299@unsupported_args("ins_cost", "del_cost", "sub_cost")
 300def _levenshtein_sql(self: BigQuery.Generator, expression: exp.Levenshtein) -> str:
 301    max_dist = expression.args.get("max_dist")
 302    if max_dist:
 303        max_dist = exp.Kwarg(this=exp.var("max_distance"), expression=max_dist)
 304
 305    return self.func("EDIT_DISTANCE", expression.this, expression.expression, max_dist)
 306
 307
 308def _build_levenshtein(args: t.List) -> exp.Levenshtein:
 309    max_dist = seq_get(args, 2)
 310    return exp.Levenshtein(
 311        this=seq_get(args, 0),
 312        expression=seq_get(args, 1),
 313        max_dist=max_dist.expression if max_dist else None,
 314    )
 315
 316
 317def _build_format_time(expr_type: t.Type[exp.Expression]) -> t.Callable[[t.List], exp.TimeToStr]:
 318    def _builder(args: t.List) -> exp.TimeToStr:
 319        return exp.TimeToStr(
 320            this=expr_type(this=seq_get(args, 1)),
 321            format=seq_get(args, 0),
 322            zone=seq_get(args, 2),
 323        )
 324
 325    return _builder
 326
 327
 328def _build_contains_substring(args: t.List) -> exp.Contains | exp.Anonymous:
 329    if len(args) == 3:
 330        return exp.Anonymous(this="CONTAINS_SUBSTR", expressions=args)
 331
 332    # Lowercase the operands in case of transpilation, as exp.Contains
 333    # is case-sensitive on other dialects
 334    this = exp.Lower(this=seq_get(args, 0))
 335    expr = exp.Lower(this=seq_get(args, 1))
 336
 337    return exp.Contains(this=this, expression=expr)
 338
 339
 340def _json_extract_sql(self: BigQuery.Generator, expression: JSON_EXTRACT_TYPE) -> str:
 341    name = (expression._meta and expression.meta.get("name")) or expression.sql_name()
 342    upper = name.upper()
 343
 344    dquote_escaping = upper in DQUOTES_ESCAPING_JSON_FUNCTIONS
 345
 346    if dquote_escaping:
 347        self._quote_json_path_key_using_brackets = False
 348
 349    sql = rename_func(upper)(self, expression)
 350
 351    if dquote_escaping:
 352        self._quote_json_path_key_using_brackets = True
 353
 354    return sql
 355
 356
 357def _annotate_concat(self: TypeAnnotator, expression: exp.Concat) -> exp.Concat:
 358    annotated = self._annotate_by_args(expression, "expressions")
 359
 360    # Args must be BYTES or types that can be cast to STRING, return type is either BYTES or STRING
 361    # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#concat
 362    if not annotated.is_type(exp.DataType.Type.BINARY, exp.DataType.Type.UNKNOWN):
 363        annotated.type = exp.DataType.Type.VARCHAR
 364
 365    return annotated
 366
 367
 368def _annotate_array(self: TypeAnnotator, expression: exp.Array) -> exp.Array:
 369    array_args = expression.expressions
 370
 371    # BigQuery behaves as follows:
 372    #
 373    # SELECT t, TYPEOF(t) FROM (SELECT 'foo') AS t            -- foo, STRUCT<STRING>
 374    # SELECT ARRAY(SELECT 'foo'), TYPEOF(ARRAY(SELECT 'foo')) -- foo, ARRAY<STRING>
 375    if (
 376        len(array_args) == 1
 377        and isinstance(select := array_args[0].unnest(), exp.Select)
 378        and (query_type := select.meta.get("query_type")) is not None
 379        and query_type.is_type(exp.DataType.Type.STRUCT)
 380        and len(query_type.expressions) == 1
 381        and isinstance(col_def := query_type.expressions[0], exp.ColumnDef)
 382        and (projection_type := col_def.kind) is not None
 383        and not projection_type.is_type(exp.DataType.Type.UNKNOWN)
 384    ):
 385        array_type = exp.DataType(
 386            this=exp.DataType.Type.ARRAY,
 387            expressions=[projection_type.copy()],
 388            nested=True,
 389        )
 390        return self._annotate_with_type(expression, array_type)
 391
 392    return self._annotate_by_args(expression, "expressions", array=True)
 393
 394
 395class BigQuery(Dialect):
 396    WEEK_OFFSET = -1
 397    UNNEST_COLUMN_ONLY = True
 398    SUPPORTS_USER_DEFINED_TYPES = False
 399    SUPPORTS_SEMI_ANTI_JOIN = False
 400    LOG_BASE_FIRST = False
 401    HEX_LOWERCASE = True
 402    FORCE_EARLY_ALIAS_REF_EXPANSION = True
 403    PRESERVE_ORIGINAL_NAMES = True
 404    HEX_STRING_IS_INTEGER_TYPE = True
 405
 406    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
 407    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
 408
 409    # bigquery udfs are case sensitive
 410    NORMALIZE_FUNCTIONS = False
 411
 412    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
 413    TIME_MAPPING = {
 414        "%D": "%m/%d/%y",
 415        "%E6S": "%S.%f",
 416        "%e": "%-d",
 417    }
 418
 419    FORMAT_MAPPING = {
 420        "DD": "%d",
 421        "MM": "%m",
 422        "MON": "%b",
 423        "MONTH": "%B",
 424        "YYYY": "%Y",
 425        "YY": "%y",
 426        "HH": "%I",
 427        "HH12": "%I",
 428        "HH24": "%H",
 429        "MI": "%M",
 430        "SS": "%S",
 431        "SSSSS": "%f",
 432        "TZH": "%z",
 433    }
 434
 435    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
 436    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
 437    # https://cloud.google.com/bigquery/docs/querying-wildcard-tables#scanning_a_range_of_tables_using_table_suffix
 438    # https://cloud.google.com/bigquery/docs/query-cloud-storage-data#query_the_file_name_pseudo-column
 439    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE", "_TABLE_SUFFIX", "_FILE_NAME"}
 440
 441    # All set operations require either a DISTINCT or ALL specifier
 442    SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
 443
 444    # BigQuery maps Type.TIMESTAMP to DATETIME, so we need to amend the inferred types
 445    TYPE_TO_EXPRESSIONS = {
 446        **Dialect.TYPE_TO_EXPRESSIONS,
 447        exp.DataType.Type.TIMESTAMPTZ: Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.TIMESTAMP],
 448    }
 449    TYPE_TO_EXPRESSIONS.pop(exp.DataType.Type.TIMESTAMP)
 450
 451    ANNOTATORS = {
 452        **Dialect.ANNOTATORS,
 453        **{
 454            expr_type: annotate_with_type_lambda(data_type)
 455            for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
 456            for expr_type in expressions
 457        },
 458        **{
 459            expr_type: lambda self, e: _annotate_math_functions(self, e)
 460            for expr_type in (exp.Floor, exp.Ceil, exp.Log, exp.Ln, exp.Sqrt, exp.Exp, exp.Round)
 461        },
 462        **{
 463            expr_type: lambda self, e: self._annotate_by_args(e, "this")
 464            for expr_type in (
 465                exp.Left,
 466                exp.Right,
 467                exp.Lower,
 468                exp.Upper,
 469                exp.Pad,
 470                exp.Trim,
 471                exp.RegexpExtract,
 472                exp.RegexpReplace,
 473                exp.Repeat,
 474                exp.Substring,
 475            )
 476        },
 477        exp.Array: _annotate_array,
 478        exp.ArrayConcat: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
 479        exp.Ascii: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 480        exp.BitwiseAndAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 481        exp.BitwiseOrAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 482        exp.BitwiseXorAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 483        exp.BitwiseCountAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 484        exp.Concat: _annotate_concat,
 485        exp.Corr: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 486        exp.CovarPop: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 487        exp.CovarSamp: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 488        exp.JSONArray: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 489        exp.JSONExtractScalar: lambda self, e: self._annotate_with_type(
 490            e, exp.DataType.Type.VARCHAR
 491        ),
 492        exp.JSONValueArray: lambda self, e: self._annotate_with_type(
 493            e, exp.DataType.build("ARRAY<VARCHAR>")
 494        ),
 495        exp.JSONType: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 496        exp.Lag: lambda self, e: self._annotate_by_args(e, "this", "default"),
 497        exp.SHA: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 498        exp.SHA2: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 499        exp.Sign: lambda self, e: self._annotate_by_args(e, "this"),
 500        exp.Split: lambda self, e: self._annotate_by_args(e, "this", array=True),
 501        exp.TimestampFromParts: lambda self, e: self._annotate_with_type(
 502            e, exp.DataType.Type.DATETIME
 503        ),
 504        exp.Unicode: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 505    }
 506
 507    def normalize_identifier(self, expression: E) -> E:
 508        if (
 509            isinstance(expression, exp.Identifier)
 510            and self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
 511        ):
 512            parent = expression.parent
 513            while isinstance(parent, exp.Dot):
 514                parent = parent.parent
 515
 516            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
 517            # by default. The following check uses a heuristic to detect tables based on whether
 518            # they are qualified. This should generally be correct, because tables in BigQuery
 519            # must be qualified with at least a dataset, unless @@dataset_id is set.
 520            case_sensitive = (
 521                isinstance(parent, exp.UserDefinedFunction)
 522                or (
 523                    isinstance(parent, exp.Table)
 524                    and parent.db
 525                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
 526                )
 527                or expression.meta.get("is_table")
 528            )
 529            if not case_sensitive:
 530                expression.set("this", expression.this.lower())
 531
 532            return t.cast(E, expression)
 533
 534        return super().normalize_identifier(expression)
 535
 536    class Tokenizer(tokens.Tokenizer):
 537        QUOTES = ["'", '"', '"""', "'''"]
 538        COMMENTS = ["--", "#", ("/*", "*/")]
 539        IDENTIFIERS = ["`"]
 540        STRING_ESCAPES = ["\\"]
 541
 542        HEX_STRINGS = [("0x", ""), ("0X", "")]
 543
 544        BYTE_STRINGS = [
 545            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
 546        ]
 547
 548        RAW_STRINGS = [
 549            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
 550        ]
 551
 552        NESTED_COMMENTS = False
 553
 554        KEYWORDS = {
 555            **tokens.Tokenizer.KEYWORDS,
 556            "ANY TYPE": TokenType.VARIANT,
 557            "BEGIN": TokenType.COMMAND,
 558            "BEGIN TRANSACTION": TokenType.BEGIN,
 559            "BYTEINT": TokenType.INT,
 560            "BYTES": TokenType.BINARY,
 561            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
 562            "DATETIME": TokenType.TIMESTAMP,
 563            "DECLARE": TokenType.DECLARE,
 564            "ELSEIF": TokenType.COMMAND,
 565            "EXCEPTION": TokenType.COMMAND,
 566            "EXPORT": TokenType.EXPORT,
 567            "FLOAT64": TokenType.DOUBLE,
 568            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
 569            "MODEL": TokenType.MODEL,
 570            "NOT DETERMINISTIC": TokenType.VOLATILE,
 571            "RECORD": TokenType.STRUCT,
 572            "TIMESTAMP": TokenType.TIMESTAMPTZ,
 573        }
 574        KEYWORDS.pop("DIV")
 575        KEYWORDS.pop("VALUES")
 576        KEYWORDS.pop("/*+")
 577
 578    class Parser(parser.Parser):
 579        PREFIXED_PIVOT_COLUMNS = True
 580        LOG_DEFAULTS_TO_LN = True
 581        SUPPORTS_IMPLICIT_UNNEST = True
 582        JOINS_HAVE_EQUAL_PRECEDENCE = True
 583
 584        # BigQuery does not allow ASC/DESC to be used as an identifier
 585        ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
 586        ALIAS_TOKENS = parser.Parser.ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 587        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 588        COMMENT_TABLE_ALIAS_TOKENS = parser.Parser.COMMENT_TABLE_ALIAS_TOKENS - {
 589            TokenType.ASC,
 590            TokenType.DESC,
 591        }
 592        UPDATE_ALIAS_TOKENS = parser.Parser.UPDATE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 593
 594        FUNCTIONS = {
 595            **parser.Parser.FUNCTIONS,
 596            "CONTAINS_SUBSTR": _build_contains_substring,
 597            "DATE": _build_date,
 598            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
 599            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
 600            "DATE_TRUNC": lambda args: exp.DateTrunc(
 601                unit=seq_get(args, 1),
 602                this=seq_get(args, 0),
 603                zone=seq_get(args, 2),
 604            ),
 605            "DATETIME": _build_datetime,
 606            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
 607            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
 608            "DIV": binary_from_function(exp.IntDiv),
 609            "EDIT_DISTANCE": _build_levenshtein,
 610            "FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
 611            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
 612            "JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 613            "JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 614            "JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
 615            "JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 616            "JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 617            "JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 618            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 619            "MD5": exp.MD5Digest.from_arg_list,
 620            "TO_HEX": _build_to_hex,
 621            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
 622                [seq_get(args, 1), seq_get(args, 0)]
 623            ),
 624            "PARSE_TIMESTAMP": _build_parse_timestamp,
 625            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
 626            "REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
 627            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 628            "REGEXP_EXTRACT_ALL": _build_regexp_extract(
 629                exp.RegexpExtractAll, default_group=exp.Literal.number(0)
 630            ),
 631            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
 632            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
 633            "SPLIT": lambda args: exp.Split(
 634                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
 635                this=seq_get(args, 0),
 636                expression=seq_get(args, 1) or exp.Literal.string(","),
 637            ),
 638            "STRPOS": exp.StrPosition.from_arg_list,
 639            "TIME": _build_time,
 640            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
 641            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
 642            "TIMESTAMP": _build_timestamp,
 643            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
 644            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
 645            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
 646                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
 647            ),
 648            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
 649                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
 650            ),
 651            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
 652            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
 653            "FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
 654            "FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
 655        }
 656
 657        FUNCTION_PARSERS = {
 658            **parser.Parser.FUNCTION_PARSERS,
 659            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 660            "JSON_ARRAY": lambda self: self.expression(
 661                exp.JSONArray, expressions=self._parse_csv(self._parse_bitwise)
 662            ),
 663            "MAKE_INTERVAL": lambda self: self._parse_make_interval(),
 664            "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
 665        }
 666        FUNCTION_PARSERS.pop("TRIM")
 667
 668        NO_PAREN_FUNCTIONS = {
 669            **parser.Parser.NO_PAREN_FUNCTIONS,
 670            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
 671        }
 672
 673        NESTED_TYPE_TOKENS = {
 674            *parser.Parser.NESTED_TYPE_TOKENS,
 675            TokenType.TABLE,
 676        }
 677
 678        PROPERTY_PARSERS = {
 679            **parser.Parser.PROPERTY_PARSERS,
 680            "NOT DETERMINISTIC": lambda self: self.expression(
 681                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
 682            ),
 683            "OPTIONS": lambda self: self._parse_with_property(),
 684        }
 685
 686        CONSTRAINT_PARSERS = {
 687            **parser.Parser.CONSTRAINT_PARSERS,
 688            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
 689        }
 690
 691        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
 692        RANGE_PARSERS.pop(TokenType.OVERLAPS)
 693
 694        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
 695
 696        DASHED_TABLE_PART_FOLLOW_TOKENS = {TokenType.DOT, TokenType.L_PAREN, TokenType.R_PAREN}
 697
 698        STATEMENT_PARSERS = {
 699            **parser.Parser.STATEMENT_PARSERS,
 700            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
 701            TokenType.END: lambda self: self._parse_as_command(self._prev),
 702            TokenType.FOR: lambda self: self._parse_for_in(),
 703            TokenType.EXPORT: lambda self: self._parse_export_data(),
 704            TokenType.DECLARE: lambda self: self._parse_declare(),
 705        }
 706
 707        BRACKET_OFFSETS = {
 708            "OFFSET": (0, False),
 709            "ORDINAL": (1, False),
 710            "SAFE_OFFSET": (0, True),
 711            "SAFE_ORDINAL": (1, True),
 712        }
 713
 714        def _parse_for_in(self) -> exp.ForIn:
 715            this = self._parse_range()
 716            self._match_text_seq("DO")
 717            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
 718
 719        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
 720            this = super()._parse_table_part(schema=schema) or self._parse_number()
 721
 722            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
 723            if isinstance(this, exp.Identifier):
 724                table_name = this.name
 725                while self._match(TokenType.DASH, advance=False) and self._next:
 726                    start = self._curr
 727                    while self._is_connected() and not self._match_set(
 728                        self.DASHED_TABLE_PART_FOLLOW_TOKENS, advance=False
 729                    ):
 730                        self._advance()
 731
 732                    if start == self._curr:
 733                        break
 734
 735                    table_name += self._find_sql(start, self._prev)
 736
 737                this = exp.Identifier(
 738                    this=table_name, quoted=this.args.get("quoted")
 739                ).update_positions(this)
 740            elif isinstance(this, exp.Literal):
 741                table_name = this.name
 742
 743                if self._is_connected() and self._parse_var(any_token=True):
 744                    table_name += self._prev.text
 745
 746                this = exp.Identifier(this=table_name, quoted=True).update_positions(this)
 747
 748            return this
 749
 750        def _parse_table_parts(
 751            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 752        ) -> exp.Table:
 753            table = super()._parse_table_parts(
 754                schema=schema, is_db_reference=is_db_reference, wildcard=True
 755            )
 756
 757            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
 758            if not table.catalog:
 759                if table.db:
 760                    previous_db = table.args["db"]
 761                    parts = table.db.split(".")
 762                    if len(parts) == 2 and not table.args["db"].quoted:
 763                        table.set(
 764                            "catalog", exp.Identifier(this=parts[0]).update_positions(previous_db)
 765                        )
 766                        table.set("db", exp.Identifier(this=parts[1]).update_positions(previous_db))
 767                else:
 768                    previous_this = table.this
 769                    parts = table.name.split(".")
 770                    if len(parts) == 2 and not table.this.quoted:
 771                        table.set(
 772                            "db", exp.Identifier(this=parts[0]).update_positions(previous_this)
 773                        )
 774                        table.set(
 775                            "this", exp.Identifier(this=parts[1]).update_positions(previous_this)
 776                        )
 777
 778            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
 779                alias = table.this
 780                catalog, db, this, *rest = (
 781                    exp.to_identifier(p, quoted=True)
 782                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
 783                )
 784
 785                for part in (catalog, db, this):
 786                    if part:
 787                        part.update_positions(table.this)
 788
 789                if rest and this:
 790                    this = exp.Dot.build([this, *rest])  # type: ignore
 791
 792                table = exp.Table(
 793                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
 794                )
 795                table.meta["quoted_table"] = True
 796            else:
 797                alias = None
 798
 799            # The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
 800            # dataset, so if the project identifier is omitted we need to fix the ast so that
 801            # the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
 802            # Otherwise, we wouldn't correctly qualify a `Table` node that references these
 803            # views, because it would seem like the "catalog" part is set, when it'd actually
 804            # be the region/dataset. Merging the two identifiers into a single one is done to
 805            # avoid producing a 4-part Table reference, which would cause issues in the schema
 806            # module, when there are 3-part table names mixed with information schema views.
 807            #
 808            # See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
 809            table_parts = table.parts
 810            if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
 811                # We need to alias the table here to avoid breaking existing qualified columns.
 812                # This is expected to be safe, because if there's an actual alias coming up in
 813                # the token stream, it will overwrite this one. If there isn't one, we are only
 814                # exposing the name that can be used to reference the view explicitly (a no-op).
 815                exp.alias_(
 816                    table,
 817                    t.cast(exp.Identifier, alias or table_parts[-1]),
 818                    table=True,
 819                    copy=False,
 820                )
 821
 822                info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
 823                new_this = exp.Identifier(this=info_schema_view, quoted=True).update_positions(
 824                    line=table_parts[-2].meta.get("line"),
 825                    col=table_parts[-1].meta.get("col"),
 826                    start=table_parts[-2].meta.get("start"),
 827                    end=table_parts[-1].meta.get("end"),
 828                )
 829                table.set("this", new_this)
 830                table.set("db", seq_get(table_parts, -3))
 831                table.set("catalog", seq_get(table_parts, -4))
 832
 833            return table
 834
 835        def _parse_column(self) -> t.Optional[exp.Expression]:
 836            column = super()._parse_column()
 837            if isinstance(column, exp.Column):
 838                parts = column.parts
 839                if any("." in p.name for p in parts):
 840                    catalog, db, table, this, *rest = (
 841                        exp.to_identifier(p, quoted=True)
 842                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
 843                    )
 844
 845                    if rest and this:
 846                        this = exp.Dot.build([this, *rest])  # type: ignore
 847
 848                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
 849                    column.meta["quoted_column"] = True
 850
 851            return column
 852
 853        @t.overload
 854        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
 855
 856        @t.overload
 857        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
 858
 859        def _parse_json_object(self, agg=False):
 860            json_object = super()._parse_json_object()
 861            array_kv_pair = seq_get(json_object.expressions, 0)
 862
 863            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
 864            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
 865            if (
 866                array_kv_pair
 867                and isinstance(array_kv_pair.this, exp.Array)
 868                and isinstance(array_kv_pair.expression, exp.Array)
 869            ):
 870                keys = array_kv_pair.this.expressions
 871                values = array_kv_pair.expression.expressions
 872
 873                json_object.set(
 874                    "expressions",
 875                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
 876                )
 877
 878            return json_object
 879
 880        def _parse_bracket(
 881            self, this: t.Optional[exp.Expression] = None
 882        ) -> t.Optional[exp.Expression]:
 883            bracket = super()._parse_bracket(this)
 884
 885            if this is bracket:
 886                return bracket
 887
 888            if isinstance(bracket, exp.Bracket):
 889                for expression in bracket.expressions:
 890                    name = expression.name.upper()
 891
 892                    if name not in self.BRACKET_OFFSETS:
 893                        break
 894
 895                    offset, safe = self.BRACKET_OFFSETS[name]
 896                    bracket.set("offset", offset)
 897                    bracket.set("safe", safe)
 898                    expression.replace(expression.expressions[0])
 899
 900            return bracket
 901
 902        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
 903            unnest = super()._parse_unnest(with_alias=with_alias)
 904
 905            if not unnest:
 906                return None
 907
 908            unnest_expr = seq_get(unnest.expressions, 0)
 909            if unnest_expr:
 910                from sqlglot.optimizer.annotate_types import annotate_types
 911
 912                unnest_expr = annotate_types(unnest_expr, dialect=self.dialect)
 913
 914                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
 915                # in contrast to other dialects such as DuckDB which flattens only the array by default
 916                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
 917                    array_elem.is_type(exp.DataType.Type.STRUCT)
 918                    for array_elem in unnest_expr._type.expressions
 919                ):
 920                    unnest.set("explode_array", True)
 921
 922            return unnest
 923
 924        def _parse_make_interval(self) -> exp.MakeInterval:
 925            expr = exp.MakeInterval()
 926
 927            for arg_key in expr.arg_types:
 928                value = self._parse_lambda()
 929
 930                if not value:
 931                    break
 932
 933                # Non-named arguments are filled sequentially, (optionally) followed by named arguments
 934                # that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
 935                if isinstance(value, exp.Kwarg):
 936                    arg_key = value.this.name
 937
 938                expr.set(arg_key, value)
 939
 940                self._match(TokenType.COMMA)
 941
 942            return expr
 943
 944        def _parse_features_at_time(self) -> exp.FeaturesAtTime:
 945            expr = self.expression(
 946                exp.FeaturesAtTime,
 947                this=(self._match(TokenType.TABLE) and self._parse_table())
 948                or self._parse_select(nested=True),
 949            )
 950
 951            while self._match(TokenType.COMMA):
 952                arg = self._parse_lambda()
 953
 954                # Get the LHS of the Kwarg and set the arg to that value, e.g
 955                # "num_rows => 1" sets the expr's `num_rows` arg
 956                if arg:
 957                    expr.set(arg.this.name, arg)
 958
 959            return expr
 960
 961        def _parse_export_data(self) -> exp.Export:
 962            self._match_text_seq("DATA")
 963
 964            return self.expression(
 965                exp.Export,
 966                connection=self._match_text_seq("WITH", "CONNECTION") and self._parse_table_parts(),
 967                options=self._parse_properties(),
 968                this=self._match_text_seq("AS") and self._parse_select(),
 969            )
 970
 971    class Generator(generator.Generator):
 972        INTERVAL_ALLOWS_PLURAL_FORM = False
 973        JOIN_HINTS = False
 974        QUERY_HINTS = False
 975        TABLE_HINTS = False
 976        LIMIT_FETCH = "LIMIT"
 977        RENAME_TABLE_WITH_DB = False
 978        NVL2_SUPPORTED = False
 979        UNNEST_WITH_ORDINALITY = False
 980        COLLATE_IS_FUNC = True
 981        LIMIT_ONLY_LITERALS = True
 982        SUPPORTS_TABLE_ALIAS_COLUMNS = False
 983        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
 984        JSON_KEY_VALUE_PAIR_SEP = ","
 985        NULL_ORDERING_SUPPORTED = False
 986        IGNORE_NULLS_IN_FUNC = True
 987        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
 988        CAN_IMPLEMENT_ARRAY_ANY = True
 989        SUPPORTS_TO_NUMBER = False
 990        NAMED_PLACEHOLDER_TOKEN = "@"
 991        HEX_FUNC = "TO_HEX"
 992        WITH_PROPERTIES_PREFIX = "OPTIONS"
 993        SUPPORTS_EXPLODING_PROJECTIONS = False
 994        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 995        SUPPORTS_UNIX_SECONDS = True
 996
 997        TRANSFORMS = {
 998            **generator.Generator.TRANSFORMS,
 999            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1000            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
1001            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
1002            exp.Array: inline_array_unless_query,
1003            exp.ArrayContains: _array_contains_sql,
1004            exp.ArrayFilter: filter_array_using_unnest,
1005            exp.ArrayRemove: filter_array_using_unnest,
1006            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
1007            exp.CollateProperty: lambda self, e: (
1008                f"DEFAULT COLLATE {self.sql(e, 'this')}"
1009                if e.args.get("default")
1010                else f"COLLATE {self.sql(e, 'this')}"
1011            ),
1012            exp.Commit: lambda *_: "COMMIT TRANSACTION",
1013            exp.CountIf: rename_func("COUNTIF"),
1014            exp.Create: _create_sql,
1015            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
1016            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
1017            exp.DateDiff: lambda self, e: self.func(
1018                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
1019            ),
1020            exp.DateFromParts: rename_func("DATE"),
1021            exp.DateStrToDate: datestrtodate_sql,
1022            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
1023            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
1024            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
1025            exp.FromTimeZone: lambda self, e: self.func(
1026                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
1027            ),
1028            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
1029            exp.GroupConcat: lambda self, e: groupconcat_sql(
1030                self, e, func_name="STRING_AGG", within_group=False
1031            ),
1032            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
1033            exp.HexString: lambda self, e: self.hexstring_sql(e, binary_function_repr="FROM_HEX"),
1034            exp.If: if_sql(false_value="NULL"),
1035            exp.ILike: no_ilike_sql,
1036            exp.IntDiv: rename_func("DIV"),
1037            exp.Int64: rename_func("INT64"),
1038            exp.JSONExtract: _json_extract_sql,
1039            exp.JSONExtractArray: _json_extract_sql,
1040            exp.JSONExtractScalar: _json_extract_sql,
1041            exp.JSONFormat: rename_func("TO_JSON_STRING"),
1042            exp.Levenshtein: _levenshtein_sql,
1043            exp.Max: max_or_greatest,
1044            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
1045            exp.MD5Digest: rename_func("MD5"),
1046            exp.Min: min_or_least,
1047            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1048            exp.RegexpExtract: lambda self, e: self.func(
1049                "REGEXP_EXTRACT",
1050                e.this,
1051                e.expression,
1052                e.args.get("position"),
1053                e.args.get("occurrence"),
1054            ),
1055            exp.RegexpExtractAll: lambda self, e: self.func(
1056                "REGEXP_EXTRACT_ALL", e.this, e.expression
1057            ),
1058            exp.RegexpReplace: regexp_replace_sql,
1059            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
1060            exp.ReturnsProperty: _returnsproperty_sql,
1061            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
1062            exp.Select: transforms.preprocess(
1063                [
1064                    transforms.explode_projection_to_unnest(),
1065                    transforms.unqualify_unnest,
1066                    transforms.eliminate_distinct_on,
1067                    _alias_ordered_group,
1068                    transforms.eliminate_semi_and_anti_joins,
1069                ]
1070            ),
1071            exp.SHA: rename_func("SHA1"),
1072            exp.SHA2: sha256_sql,
1073            exp.Space: space_sql,
1074            exp.StabilityProperty: lambda self, e: (
1075                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
1076            ),
1077            exp.String: rename_func("STRING"),
1078            exp.StrPosition: lambda self, e: (
1079                strposition_sql(
1080                    self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
1081                )
1082            ),
1083            exp.StrToDate: _str_to_datetime_sql,
1084            exp.StrToTime: _str_to_datetime_sql,
1085            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
1086            exp.TimeFromParts: rename_func("TIME"),
1087            exp.TimestampFromParts: rename_func("DATETIME"),
1088            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
1089            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
1090            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
1091            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
1092            exp.TimeStrToTime: timestrtotime_sql,
1093            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
1094            exp.TsOrDsAdd: _ts_or_ds_add_sql,
1095            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
1096            exp.TsOrDsToTime: rename_func("TIME"),
1097            exp.TsOrDsToDatetime: rename_func("DATETIME"),
1098            exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
1099            exp.Unhex: rename_func("FROM_HEX"),
1100            exp.UnixDate: rename_func("UNIX_DATE"),
1101            exp.UnixToTime: _unix_to_time_sql,
1102            exp.Uuid: lambda *_: "GENERATE_UUID()",
1103            exp.Values: _derived_table_values_to_unnest,
1104            exp.VariancePop: rename_func("VAR_POP"),
1105            exp.SafeDivide: rename_func("SAFE_DIVIDE"),
1106        }
1107
1108        SUPPORTED_JSON_PATH_PARTS = {
1109            exp.JSONPathKey,
1110            exp.JSONPathRoot,
1111            exp.JSONPathSubscript,
1112        }
1113
1114        TYPE_MAPPING = {
1115            **generator.Generator.TYPE_MAPPING,
1116            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
1117            exp.DataType.Type.BIGINT: "INT64",
1118            exp.DataType.Type.BINARY: "BYTES",
1119            exp.DataType.Type.BLOB: "BYTES",
1120            exp.DataType.Type.BOOLEAN: "BOOL",
1121            exp.DataType.Type.CHAR: "STRING",
1122            exp.DataType.Type.DECIMAL: "NUMERIC",
1123            exp.DataType.Type.DOUBLE: "FLOAT64",
1124            exp.DataType.Type.FLOAT: "FLOAT64",
1125            exp.DataType.Type.INT: "INT64",
1126            exp.DataType.Type.NCHAR: "STRING",
1127            exp.DataType.Type.NVARCHAR: "STRING",
1128            exp.DataType.Type.SMALLINT: "INT64",
1129            exp.DataType.Type.TEXT: "STRING",
1130            exp.DataType.Type.TIMESTAMP: "DATETIME",
1131            exp.DataType.Type.TIMESTAMPNTZ: "DATETIME",
1132            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
1133            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
1134            exp.DataType.Type.TINYINT: "INT64",
1135            exp.DataType.Type.ROWVERSION: "BYTES",
1136            exp.DataType.Type.UUID: "STRING",
1137            exp.DataType.Type.VARBINARY: "BYTES",
1138            exp.DataType.Type.VARCHAR: "STRING",
1139            exp.DataType.Type.VARIANT: "ANY TYPE",
1140        }
1141
1142        PROPERTIES_LOCATION = {
1143            **generator.Generator.PROPERTIES_LOCATION,
1144            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1145            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1146        }
1147
1148        # WINDOW comes after QUALIFY
1149        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
1150        AFTER_HAVING_MODIFIER_TRANSFORMS = {
1151            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
1152            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
1153        }
1154
1155        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
1156        RESERVED_KEYWORDS = {
1157            "all",
1158            "and",
1159            "any",
1160            "array",
1161            "as",
1162            "asc",
1163            "assert_rows_modified",
1164            "at",
1165            "between",
1166            "by",
1167            "case",
1168            "cast",
1169            "collate",
1170            "contains",
1171            "create",
1172            "cross",
1173            "cube",
1174            "current",
1175            "default",
1176            "define",
1177            "desc",
1178            "distinct",
1179            "else",
1180            "end",
1181            "enum",
1182            "escape",
1183            "except",
1184            "exclude",
1185            "exists",
1186            "extract",
1187            "false",
1188            "fetch",
1189            "following",
1190            "for",
1191            "from",
1192            "full",
1193            "group",
1194            "grouping",
1195            "groups",
1196            "hash",
1197            "having",
1198            "if",
1199            "ignore",
1200            "in",
1201            "inner",
1202            "intersect",
1203            "interval",
1204            "into",
1205            "is",
1206            "join",
1207            "lateral",
1208            "left",
1209            "like",
1210            "limit",
1211            "lookup",
1212            "merge",
1213            "natural",
1214            "new",
1215            "no",
1216            "not",
1217            "null",
1218            "nulls",
1219            "of",
1220            "on",
1221            "or",
1222            "order",
1223            "outer",
1224            "over",
1225            "partition",
1226            "preceding",
1227            "proto",
1228            "qualify",
1229            "range",
1230            "recursive",
1231            "respect",
1232            "right",
1233            "rollup",
1234            "rows",
1235            "select",
1236            "set",
1237            "some",
1238            "struct",
1239            "tablesample",
1240            "then",
1241            "to",
1242            "treat",
1243            "true",
1244            "unbounded",
1245            "union",
1246            "unnest",
1247            "using",
1248            "when",
1249            "where",
1250            "window",
1251            "with",
1252            "within",
1253        }
1254
1255        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1256            unit = expression.unit
1257            unit_sql = unit.name if unit.is_string else self.sql(unit)
1258            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
1259
1260        def mod_sql(self, expression: exp.Mod) -> str:
1261            this = expression.this
1262            expr = expression.expression
1263            return self.func(
1264                "MOD",
1265                this.unnest() if isinstance(this, exp.Paren) else this,
1266                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1267            )
1268
1269        def column_parts(self, expression: exp.Column) -> str:
1270            if expression.meta.get("quoted_column"):
1271                # If a column reference is of the form `dataset.table`.name, we need
1272                # to preserve the quoted table path, otherwise the reference breaks
1273                table_parts = ".".join(p.name for p in expression.parts[:-1])
1274                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1275                return f"{table_path}.{self.sql(expression, 'this')}"
1276
1277            return super().column_parts(expression)
1278
1279        def table_parts(self, expression: exp.Table) -> str:
1280            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1281            # we need to make sure the correct quoting is used in each case.
1282            #
1283            # For example, if there is a CTE x that clashes with a schema name, then the former will
1284            # return the table y in that schema, whereas the latter will return the CTE's y column:
1285            #
1286            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1287            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1288            if expression.meta.get("quoted_table"):
1289                table_parts = ".".join(p.name for p in expression.parts)
1290                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1291
1292            return super().table_parts(expression)
1293
1294        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1295            this = expression.this
1296            if isinstance(this, exp.TsOrDsToDatetime):
1297                func_name = "FORMAT_DATETIME"
1298            elif isinstance(this, exp.TsOrDsToTimestamp):
1299                func_name = "FORMAT_TIMESTAMP"
1300            else:
1301                func_name = "FORMAT_DATE"
1302
1303            time_expr = (
1304                this
1305                if isinstance(this, (exp.TsOrDsToDatetime, exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
1306                else expression
1307            )
1308            return self.func(
1309                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1310            )
1311
1312        def eq_sql(self, expression: exp.EQ) -> str:
1313            # Operands of = cannot be NULL in BigQuery
1314            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1315                if not isinstance(expression.parent, exp.Update):
1316                    return "NULL"
1317
1318            return self.binary(expression, "=")
1319
1320        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1321            parent = expression.parent
1322
1323            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1324            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1325            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1326                return self.func(
1327                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1328                )
1329
1330            return super().attimezone_sql(expression)
1331
1332        def trycast_sql(self, expression: exp.TryCast) -> str:
1333            return self.cast_sql(expression, safe_prefix="SAFE_")
1334
1335        def bracket_sql(self, expression: exp.Bracket) -> str:
1336            this = expression.this
1337            expressions = expression.expressions
1338
1339            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1340                arg = expressions[0]
1341                if arg.type is None:
1342                    from sqlglot.optimizer.annotate_types import annotate_types
1343
1344                    arg = annotate_types(arg, dialect=self.dialect)
1345
1346                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1347                    # BQ doesn't support bracket syntax with string values for structs
1348                    return f"{self.sql(this)}.{arg.name}"
1349
1350            expressions_sql = self.expressions(expression, flat=True)
1351            offset = expression.args.get("offset")
1352
1353            if offset == 0:
1354                expressions_sql = f"OFFSET({expressions_sql})"
1355            elif offset == 1:
1356                expressions_sql = f"ORDINAL({expressions_sql})"
1357            elif offset is not None:
1358                self.unsupported(f"Unsupported array offset: {offset}")
1359
1360            if expression.args.get("safe"):
1361                expressions_sql = f"SAFE_{expressions_sql}"
1362
1363            return f"{self.sql(this)}[{expressions_sql}]"
1364
1365        def in_unnest_op(self, expression: exp.Unnest) -> str:
1366            return self.sql(expression)
1367
1368        def version_sql(self, expression: exp.Version) -> str:
1369            if expression.name == "TIMESTAMP":
1370                expression.set("this", "SYSTEM_TIME")
1371            return super().version_sql(expression)
1372
1373        def contains_sql(self, expression: exp.Contains) -> str:
1374            this = expression.this
1375            expr = expression.expression
1376
1377            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1378                this = this.this
1379                expr = expr.this
1380
1381            return self.func("CONTAINS_SUBSTR", this, expr)
1382
1383        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1384            this = expression.this
1385
1386            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1387            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1388            # because they aren't literals and so the above syntax is invalid BigQuery.
1389            if isinstance(this, exp.Array):
1390                elem = seq_get(this.expressions, 0)
1391                if not (elem and elem.find(exp.Query)):
1392                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1393
1394            return super().cast_sql(expression, safe_prefix=safe_prefix)
1395
1396        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1397            variables = self.expressions(expression, "this")
1398            default = self.sql(expression, "default")
1399            default = f" DEFAULT {default}" if default else ""
1400            kind = self.sql(expression, "kind")
1401            kind = f" {kind}" if kind else ""
1402
1403            return f"{variables}{kind}{default}"
logger = <Logger sqlglot (WARNING)>
DQUOTES_ESCAPING_JSON_FUNCTIONS = ('JSON_QUERY', 'JSON_VALUE', 'JSON_QUERY_ARRAY')
class BigQuery(sqlglot.dialects.dialect.Dialect):
 396class BigQuery(Dialect):
 397    WEEK_OFFSET = -1
 398    UNNEST_COLUMN_ONLY = True
 399    SUPPORTS_USER_DEFINED_TYPES = False
 400    SUPPORTS_SEMI_ANTI_JOIN = False
 401    LOG_BASE_FIRST = False
 402    HEX_LOWERCASE = True
 403    FORCE_EARLY_ALIAS_REF_EXPANSION = True
 404    PRESERVE_ORIGINAL_NAMES = True
 405    HEX_STRING_IS_INTEGER_TYPE = True
 406
 407    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
 408    NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
 409
 410    # bigquery udfs are case sensitive
 411    NORMALIZE_FUNCTIONS = False
 412
 413    # https://cloud.google.com/bigquery/docs/reference/standard-sql/format-elements#format_elements_date_time
 414    TIME_MAPPING = {
 415        "%D": "%m/%d/%y",
 416        "%E6S": "%S.%f",
 417        "%e": "%-d",
 418    }
 419
 420    FORMAT_MAPPING = {
 421        "DD": "%d",
 422        "MM": "%m",
 423        "MON": "%b",
 424        "MONTH": "%B",
 425        "YYYY": "%Y",
 426        "YY": "%y",
 427        "HH": "%I",
 428        "HH12": "%I",
 429        "HH24": "%H",
 430        "MI": "%M",
 431        "SS": "%S",
 432        "SSSSS": "%f",
 433        "TZH": "%z",
 434    }
 435
 436    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
 437    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
 438    # https://cloud.google.com/bigquery/docs/querying-wildcard-tables#scanning_a_range_of_tables_using_table_suffix
 439    # https://cloud.google.com/bigquery/docs/query-cloud-storage-data#query_the_file_name_pseudo-column
 440    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE", "_TABLE_SUFFIX", "_FILE_NAME"}
 441
 442    # All set operations require either a DISTINCT or ALL specifier
 443    SET_OP_DISTINCT_BY_DEFAULT = dict.fromkeys((exp.Except, exp.Intersect, exp.Union), None)
 444
 445    # BigQuery maps Type.TIMESTAMP to DATETIME, so we need to amend the inferred types
 446    TYPE_TO_EXPRESSIONS = {
 447        **Dialect.TYPE_TO_EXPRESSIONS,
 448        exp.DataType.Type.TIMESTAMPTZ: Dialect.TYPE_TO_EXPRESSIONS[exp.DataType.Type.TIMESTAMP],
 449    }
 450    TYPE_TO_EXPRESSIONS.pop(exp.DataType.Type.TIMESTAMP)
 451
 452    ANNOTATORS = {
 453        **Dialect.ANNOTATORS,
 454        **{
 455            expr_type: annotate_with_type_lambda(data_type)
 456            for data_type, expressions in TYPE_TO_EXPRESSIONS.items()
 457            for expr_type in expressions
 458        },
 459        **{
 460            expr_type: lambda self, e: _annotate_math_functions(self, e)
 461            for expr_type in (exp.Floor, exp.Ceil, exp.Log, exp.Ln, exp.Sqrt, exp.Exp, exp.Round)
 462        },
 463        **{
 464            expr_type: lambda self, e: self._annotate_by_args(e, "this")
 465            for expr_type in (
 466                exp.Left,
 467                exp.Right,
 468                exp.Lower,
 469                exp.Upper,
 470                exp.Pad,
 471                exp.Trim,
 472                exp.RegexpExtract,
 473                exp.RegexpReplace,
 474                exp.Repeat,
 475                exp.Substring,
 476            )
 477        },
 478        exp.Array: _annotate_array,
 479        exp.ArrayConcat: lambda self, e: self._annotate_by_args(e, "this", "expressions"),
 480        exp.Ascii: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 481        exp.BitwiseAndAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 482        exp.BitwiseOrAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 483        exp.BitwiseXorAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 484        exp.BitwiseCountAgg: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 485        exp.Concat: _annotate_concat,
 486        exp.Corr: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 487        exp.CovarPop: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 488        exp.CovarSamp: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.DOUBLE),
 489        exp.JSONArray: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.JSON),
 490        exp.JSONExtractScalar: lambda self, e: self._annotate_with_type(
 491            e, exp.DataType.Type.VARCHAR
 492        ),
 493        exp.JSONValueArray: lambda self, e: self._annotate_with_type(
 494            e, exp.DataType.build("ARRAY<VARCHAR>")
 495        ),
 496        exp.JSONType: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.VARCHAR),
 497        exp.Lag: lambda self, e: self._annotate_by_args(e, "this", "default"),
 498        exp.SHA: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 499        exp.SHA2: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BINARY),
 500        exp.Sign: lambda self, e: self._annotate_by_args(e, "this"),
 501        exp.Split: lambda self, e: self._annotate_by_args(e, "this", array=True),
 502        exp.TimestampFromParts: lambda self, e: self._annotate_with_type(
 503            e, exp.DataType.Type.DATETIME
 504        ),
 505        exp.Unicode: lambda self, e: self._annotate_with_type(e, exp.DataType.Type.BIGINT),
 506    }
 507
 508    def normalize_identifier(self, expression: E) -> E:
 509        if (
 510            isinstance(expression, exp.Identifier)
 511            and self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
 512        ):
 513            parent = expression.parent
 514            while isinstance(parent, exp.Dot):
 515                parent = parent.parent
 516
 517            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
 518            # by default. The following check uses a heuristic to detect tables based on whether
 519            # they are qualified. This should generally be correct, because tables in BigQuery
 520            # must be qualified with at least a dataset, unless @@dataset_id is set.
 521            case_sensitive = (
 522                isinstance(parent, exp.UserDefinedFunction)
 523                or (
 524                    isinstance(parent, exp.Table)
 525                    and parent.db
 526                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
 527                )
 528                or expression.meta.get("is_table")
 529            )
 530            if not case_sensitive:
 531                expression.set("this", expression.this.lower())
 532
 533            return t.cast(E, expression)
 534
 535        return super().normalize_identifier(expression)
 536
 537    class Tokenizer(tokens.Tokenizer):
 538        QUOTES = ["'", '"', '"""', "'''"]
 539        COMMENTS = ["--", "#", ("/*", "*/")]
 540        IDENTIFIERS = ["`"]
 541        STRING_ESCAPES = ["\\"]
 542
 543        HEX_STRINGS = [("0x", ""), ("0X", "")]
 544
 545        BYTE_STRINGS = [
 546            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
 547        ]
 548
 549        RAW_STRINGS = [
 550            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
 551        ]
 552
 553        NESTED_COMMENTS = False
 554
 555        KEYWORDS = {
 556            **tokens.Tokenizer.KEYWORDS,
 557            "ANY TYPE": TokenType.VARIANT,
 558            "BEGIN": TokenType.COMMAND,
 559            "BEGIN TRANSACTION": TokenType.BEGIN,
 560            "BYTEINT": TokenType.INT,
 561            "BYTES": TokenType.BINARY,
 562            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
 563            "DATETIME": TokenType.TIMESTAMP,
 564            "DECLARE": TokenType.DECLARE,
 565            "ELSEIF": TokenType.COMMAND,
 566            "EXCEPTION": TokenType.COMMAND,
 567            "EXPORT": TokenType.EXPORT,
 568            "FLOAT64": TokenType.DOUBLE,
 569            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
 570            "MODEL": TokenType.MODEL,
 571            "NOT DETERMINISTIC": TokenType.VOLATILE,
 572            "RECORD": TokenType.STRUCT,
 573            "TIMESTAMP": TokenType.TIMESTAMPTZ,
 574        }
 575        KEYWORDS.pop("DIV")
 576        KEYWORDS.pop("VALUES")
 577        KEYWORDS.pop("/*+")
 578
 579    class Parser(parser.Parser):
 580        PREFIXED_PIVOT_COLUMNS = True
 581        LOG_DEFAULTS_TO_LN = True
 582        SUPPORTS_IMPLICIT_UNNEST = True
 583        JOINS_HAVE_EQUAL_PRECEDENCE = True
 584
 585        # BigQuery does not allow ASC/DESC to be used as an identifier
 586        ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
 587        ALIAS_TOKENS = parser.Parser.ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 588        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 589        COMMENT_TABLE_ALIAS_TOKENS = parser.Parser.COMMENT_TABLE_ALIAS_TOKENS - {
 590            TokenType.ASC,
 591            TokenType.DESC,
 592        }
 593        UPDATE_ALIAS_TOKENS = parser.Parser.UPDATE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
 594
 595        FUNCTIONS = {
 596            **parser.Parser.FUNCTIONS,
 597            "CONTAINS_SUBSTR": _build_contains_substring,
 598            "DATE": _build_date,
 599            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
 600            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
 601            "DATE_TRUNC": lambda args: exp.DateTrunc(
 602                unit=seq_get(args, 1),
 603                this=seq_get(args, 0),
 604                zone=seq_get(args, 2),
 605            ),
 606            "DATETIME": _build_datetime,
 607            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
 608            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
 609            "DIV": binary_from_function(exp.IntDiv),
 610            "EDIT_DISTANCE": _build_levenshtein,
 611            "FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
 612            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
 613            "JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 614            "JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 615            "JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
 616            "JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
 617            "JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
 618            "JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
 619            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 620            "MD5": exp.MD5Digest.from_arg_list,
 621            "TO_HEX": _build_to_hex,
 622            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
 623                [seq_get(args, 1), seq_get(args, 0)]
 624            ),
 625            "PARSE_TIMESTAMP": _build_parse_timestamp,
 626            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
 627            "REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
 628            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 629            "REGEXP_EXTRACT_ALL": _build_regexp_extract(
 630                exp.RegexpExtractAll, default_group=exp.Literal.number(0)
 631            ),
 632            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
 633            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
 634            "SPLIT": lambda args: exp.Split(
 635                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
 636                this=seq_get(args, 0),
 637                expression=seq_get(args, 1) or exp.Literal.string(","),
 638            ),
 639            "STRPOS": exp.StrPosition.from_arg_list,
 640            "TIME": _build_time,
 641            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
 642            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
 643            "TIMESTAMP": _build_timestamp,
 644            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
 645            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
 646            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
 647                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
 648            ),
 649            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
 650                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
 651            ),
 652            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
 653            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
 654            "FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
 655            "FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
 656        }
 657
 658        FUNCTION_PARSERS = {
 659            **parser.Parser.FUNCTION_PARSERS,
 660            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
 661            "JSON_ARRAY": lambda self: self.expression(
 662                exp.JSONArray, expressions=self._parse_csv(self._parse_bitwise)
 663            ),
 664            "MAKE_INTERVAL": lambda self: self._parse_make_interval(),
 665            "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
 666        }
 667        FUNCTION_PARSERS.pop("TRIM")
 668
 669        NO_PAREN_FUNCTIONS = {
 670            **parser.Parser.NO_PAREN_FUNCTIONS,
 671            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
 672        }
 673
 674        NESTED_TYPE_TOKENS = {
 675            *parser.Parser.NESTED_TYPE_TOKENS,
 676            TokenType.TABLE,
 677        }
 678
 679        PROPERTY_PARSERS = {
 680            **parser.Parser.PROPERTY_PARSERS,
 681            "NOT DETERMINISTIC": lambda self: self.expression(
 682                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
 683            ),
 684            "OPTIONS": lambda self: self._parse_with_property(),
 685        }
 686
 687        CONSTRAINT_PARSERS = {
 688            **parser.Parser.CONSTRAINT_PARSERS,
 689            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
 690        }
 691
 692        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
 693        RANGE_PARSERS.pop(TokenType.OVERLAPS)
 694
 695        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
 696
 697        DASHED_TABLE_PART_FOLLOW_TOKENS = {TokenType.DOT, TokenType.L_PAREN, TokenType.R_PAREN}
 698
 699        STATEMENT_PARSERS = {
 700            **parser.Parser.STATEMENT_PARSERS,
 701            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
 702            TokenType.END: lambda self: self._parse_as_command(self._prev),
 703            TokenType.FOR: lambda self: self._parse_for_in(),
 704            TokenType.EXPORT: lambda self: self._parse_export_data(),
 705            TokenType.DECLARE: lambda self: self._parse_declare(),
 706        }
 707
 708        BRACKET_OFFSETS = {
 709            "OFFSET": (0, False),
 710            "ORDINAL": (1, False),
 711            "SAFE_OFFSET": (0, True),
 712            "SAFE_ORDINAL": (1, True),
 713        }
 714
 715        def _parse_for_in(self) -> exp.ForIn:
 716            this = self._parse_range()
 717            self._match_text_seq("DO")
 718            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
 719
 720        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
 721            this = super()._parse_table_part(schema=schema) or self._parse_number()
 722
 723            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
 724            if isinstance(this, exp.Identifier):
 725                table_name = this.name
 726                while self._match(TokenType.DASH, advance=False) and self._next:
 727                    start = self._curr
 728                    while self._is_connected() and not self._match_set(
 729                        self.DASHED_TABLE_PART_FOLLOW_TOKENS, advance=False
 730                    ):
 731                        self._advance()
 732
 733                    if start == self._curr:
 734                        break
 735
 736                    table_name += self._find_sql(start, self._prev)
 737
 738                this = exp.Identifier(
 739                    this=table_name, quoted=this.args.get("quoted")
 740                ).update_positions(this)
 741            elif isinstance(this, exp.Literal):
 742                table_name = this.name
 743
 744                if self._is_connected() and self._parse_var(any_token=True):
 745                    table_name += self._prev.text
 746
 747                this = exp.Identifier(this=table_name, quoted=True).update_positions(this)
 748
 749            return this
 750
 751        def _parse_table_parts(
 752            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 753        ) -> exp.Table:
 754            table = super()._parse_table_parts(
 755                schema=schema, is_db_reference=is_db_reference, wildcard=True
 756            )
 757
 758            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
 759            if not table.catalog:
 760                if table.db:
 761                    previous_db = table.args["db"]
 762                    parts = table.db.split(".")
 763                    if len(parts) == 2 and not table.args["db"].quoted:
 764                        table.set(
 765                            "catalog", exp.Identifier(this=parts[0]).update_positions(previous_db)
 766                        )
 767                        table.set("db", exp.Identifier(this=parts[1]).update_positions(previous_db))
 768                else:
 769                    previous_this = table.this
 770                    parts = table.name.split(".")
 771                    if len(parts) == 2 and not table.this.quoted:
 772                        table.set(
 773                            "db", exp.Identifier(this=parts[0]).update_positions(previous_this)
 774                        )
 775                        table.set(
 776                            "this", exp.Identifier(this=parts[1]).update_positions(previous_this)
 777                        )
 778
 779            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
 780                alias = table.this
 781                catalog, db, this, *rest = (
 782                    exp.to_identifier(p, quoted=True)
 783                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
 784                )
 785
 786                for part in (catalog, db, this):
 787                    if part:
 788                        part.update_positions(table.this)
 789
 790                if rest and this:
 791                    this = exp.Dot.build([this, *rest])  # type: ignore
 792
 793                table = exp.Table(
 794                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
 795                )
 796                table.meta["quoted_table"] = True
 797            else:
 798                alias = None
 799
 800            # The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
 801            # dataset, so if the project identifier is omitted we need to fix the ast so that
 802            # the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
 803            # Otherwise, we wouldn't correctly qualify a `Table` node that references these
 804            # views, because it would seem like the "catalog" part is set, when it'd actually
 805            # be the region/dataset. Merging the two identifiers into a single one is done to
 806            # avoid producing a 4-part Table reference, which would cause issues in the schema
 807            # module, when there are 3-part table names mixed with information schema views.
 808            #
 809            # See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
 810            table_parts = table.parts
 811            if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
 812                # We need to alias the table here to avoid breaking existing qualified columns.
 813                # This is expected to be safe, because if there's an actual alias coming up in
 814                # the token stream, it will overwrite this one. If there isn't one, we are only
 815                # exposing the name that can be used to reference the view explicitly (a no-op).
 816                exp.alias_(
 817                    table,
 818                    t.cast(exp.Identifier, alias or table_parts[-1]),
 819                    table=True,
 820                    copy=False,
 821                )
 822
 823                info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
 824                new_this = exp.Identifier(this=info_schema_view, quoted=True).update_positions(
 825                    line=table_parts[-2].meta.get("line"),
 826                    col=table_parts[-1].meta.get("col"),
 827                    start=table_parts[-2].meta.get("start"),
 828                    end=table_parts[-1].meta.get("end"),
 829                )
 830                table.set("this", new_this)
 831                table.set("db", seq_get(table_parts, -3))
 832                table.set("catalog", seq_get(table_parts, -4))
 833
 834            return table
 835
 836        def _parse_column(self) -> t.Optional[exp.Expression]:
 837            column = super()._parse_column()
 838            if isinstance(column, exp.Column):
 839                parts = column.parts
 840                if any("." in p.name for p in parts):
 841                    catalog, db, table, this, *rest = (
 842                        exp.to_identifier(p, quoted=True)
 843                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
 844                    )
 845
 846                    if rest and this:
 847                        this = exp.Dot.build([this, *rest])  # type: ignore
 848
 849                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
 850                    column.meta["quoted_column"] = True
 851
 852            return column
 853
 854        @t.overload
 855        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
 856
 857        @t.overload
 858        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
 859
 860        def _parse_json_object(self, agg=False):
 861            json_object = super()._parse_json_object()
 862            array_kv_pair = seq_get(json_object.expressions, 0)
 863
 864            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
 865            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
 866            if (
 867                array_kv_pair
 868                and isinstance(array_kv_pair.this, exp.Array)
 869                and isinstance(array_kv_pair.expression, exp.Array)
 870            ):
 871                keys = array_kv_pair.this.expressions
 872                values = array_kv_pair.expression.expressions
 873
 874                json_object.set(
 875                    "expressions",
 876                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
 877                )
 878
 879            return json_object
 880
 881        def _parse_bracket(
 882            self, this: t.Optional[exp.Expression] = None
 883        ) -> t.Optional[exp.Expression]:
 884            bracket = super()._parse_bracket(this)
 885
 886            if this is bracket:
 887                return bracket
 888
 889            if isinstance(bracket, exp.Bracket):
 890                for expression in bracket.expressions:
 891                    name = expression.name.upper()
 892
 893                    if name not in self.BRACKET_OFFSETS:
 894                        break
 895
 896                    offset, safe = self.BRACKET_OFFSETS[name]
 897                    bracket.set("offset", offset)
 898                    bracket.set("safe", safe)
 899                    expression.replace(expression.expressions[0])
 900
 901            return bracket
 902
 903        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
 904            unnest = super()._parse_unnest(with_alias=with_alias)
 905
 906            if not unnest:
 907                return None
 908
 909            unnest_expr = seq_get(unnest.expressions, 0)
 910            if unnest_expr:
 911                from sqlglot.optimizer.annotate_types import annotate_types
 912
 913                unnest_expr = annotate_types(unnest_expr, dialect=self.dialect)
 914
 915                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
 916                # in contrast to other dialects such as DuckDB which flattens only the array by default
 917                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
 918                    array_elem.is_type(exp.DataType.Type.STRUCT)
 919                    for array_elem in unnest_expr._type.expressions
 920                ):
 921                    unnest.set("explode_array", True)
 922
 923            return unnest
 924
 925        def _parse_make_interval(self) -> exp.MakeInterval:
 926            expr = exp.MakeInterval()
 927
 928            for arg_key in expr.arg_types:
 929                value = self._parse_lambda()
 930
 931                if not value:
 932                    break
 933
 934                # Non-named arguments are filled sequentially, (optionally) followed by named arguments
 935                # that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
 936                if isinstance(value, exp.Kwarg):
 937                    arg_key = value.this.name
 938
 939                expr.set(arg_key, value)
 940
 941                self._match(TokenType.COMMA)
 942
 943            return expr
 944
 945        def _parse_features_at_time(self) -> exp.FeaturesAtTime:
 946            expr = self.expression(
 947                exp.FeaturesAtTime,
 948                this=(self._match(TokenType.TABLE) and self._parse_table())
 949                or self._parse_select(nested=True),
 950            )
 951
 952            while self._match(TokenType.COMMA):
 953                arg = self._parse_lambda()
 954
 955                # Get the LHS of the Kwarg and set the arg to that value, e.g
 956                # "num_rows => 1" sets the expr's `num_rows` arg
 957                if arg:
 958                    expr.set(arg.this.name, arg)
 959
 960            return expr
 961
 962        def _parse_export_data(self) -> exp.Export:
 963            self._match_text_seq("DATA")
 964
 965            return self.expression(
 966                exp.Export,
 967                connection=self._match_text_seq("WITH", "CONNECTION") and self._parse_table_parts(),
 968                options=self._parse_properties(),
 969                this=self._match_text_seq("AS") and self._parse_select(),
 970            )
 971
 972    class Generator(generator.Generator):
 973        INTERVAL_ALLOWS_PLURAL_FORM = False
 974        JOIN_HINTS = False
 975        QUERY_HINTS = False
 976        TABLE_HINTS = False
 977        LIMIT_FETCH = "LIMIT"
 978        RENAME_TABLE_WITH_DB = False
 979        NVL2_SUPPORTED = False
 980        UNNEST_WITH_ORDINALITY = False
 981        COLLATE_IS_FUNC = True
 982        LIMIT_ONLY_LITERALS = True
 983        SUPPORTS_TABLE_ALIAS_COLUMNS = False
 984        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
 985        JSON_KEY_VALUE_PAIR_SEP = ","
 986        NULL_ORDERING_SUPPORTED = False
 987        IGNORE_NULLS_IN_FUNC = True
 988        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
 989        CAN_IMPLEMENT_ARRAY_ANY = True
 990        SUPPORTS_TO_NUMBER = False
 991        NAMED_PLACEHOLDER_TOKEN = "@"
 992        HEX_FUNC = "TO_HEX"
 993        WITH_PROPERTIES_PREFIX = "OPTIONS"
 994        SUPPORTS_EXPLODING_PROJECTIONS = False
 995        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 996        SUPPORTS_UNIX_SECONDS = True
 997
 998        TRANSFORMS = {
 999            **generator.Generator.TRANSFORMS,
1000            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1001            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
1002            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
1003            exp.Array: inline_array_unless_query,
1004            exp.ArrayContains: _array_contains_sql,
1005            exp.ArrayFilter: filter_array_using_unnest,
1006            exp.ArrayRemove: filter_array_using_unnest,
1007            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
1008            exp.CollateProperty: lambda self, e: (
1009                f"DEFAULT COLLATE {self.sql(e, 'this')}"
1010                if e.args.get("default")
1011                else f"COLLATE {self.sql(e, 'this')}"
1012            ),
1013            exp.Commit: lambda *_: "COMMIT TRANSACTION",
1014            exp.CountIf: rename_func("COUNTIF"),
1015            exp.Create: _create_sql,
1016            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
1017            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
1018            exp.DateDiff: lambda self, e: self.func(
1019                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
1020            ),
1021            exp.DateFromParts: rename_func("DATE"),
1022            exp.DateStrToDate: datestrtodate_sql,
1023            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
1024            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
1025            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
1026            exp.FromTimeZone: lambda self, e: self.func(
1027                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
1028            ),
1029            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
1030            exp.GroupConcat: lambda self, e: groupconcat_sql(
1031                self, e, func_name="STRING_AGG", within_group=False
1032            ),
1033            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
1034            exp.HexString: lambda self, e: self.hexstring_sql(e, binary_function_repr="FROM_HEX"),
1035            exp.If: if_sql(false_value="NULL"),
1036            exp.ILike: no_ilike_sql,
1037            exp.IntDiv: rename_func("DIV"),
1038            exp.Int64: rename_func("INT64"),
1039            exp.JSONExtract: _json_extract_sql,
1040            exp.JSONExtractArray: _json_extract_sql,
1041            exp.JSONExtractScalar: _json_extract_sql,
1042            exp.JSONFormat: rename_func("TO_JSON_STRING"),
1043            exp.Levenshtein: _levenshtein_sql,
1044            exp.Max: max_or_greatest,
1045            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
1046            exp.MD5Digest: rename_func("MD5"),
1047            exp.Min: min_or_least,
1048            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1049            exp.RegexpExtract: lambda self, e: self.func(
1050                "REGEXP_EXTRACT",
1051                e.this,
1052                e.expression,
1053                e.args.get("position"),
1054                e.args.get("occurrence"),
1055            ),
1056            exp.RegexpExtractAll: lambda self, e: self.func(
1057                "REGEXP_EXTRACT_ALL", e.this, e.expression
1058            ),
1059            exp.RegexpReplace: regexp_replace_sql,
1060            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
1061            exp.ReturnsProperty: _returnsproperty_sql,
1062            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
1063            exp.Select: transforms.preprocess(
1064                [
1065                    transforms.explode_projection_to_unnest(),
1066                    transforms.unqualify_unnest,
1067                    transforms.eliminate_distinct_on,
1068                    _alias_ordered_group,
1069                    transforms.eliminate_semi_and_anti_joins,
1070                ]
1071            ),
1072            exp.SHA: rename_func("SHA1"),
1073            exp.SHA2: sha256_sql,
1074            exp.Space: space_sql,
1075            exp.StabilityProperty: lambda self, e: (
1076                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
1077            ),
1078            exp.String: rename_func("STRING"),
1079            exp.StrPosition: lambda self, e: (
1080                strposition_sql(
1081                    self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
1082                )
1083            ),
1084            exp.StrToDate: _str_to_datetime_sql,
1085            exp.StrToTime: _str_to_datetime_sql,
1086            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
1087            exp.TimeFromParts: rename_func("TIME"),
1088            exp.TimestampFromParts: rename_func("DATETIME"),
1089            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
1090            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
1091            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
1092            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
1093            exp.TimeStrToTime: timestrtotime_sql,
1094            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
1095            exp.TsOrDsAdd: _ts_or_ds_add_sql,
1096            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
1097            exp.TsOrDsToTime: rename_func("TIME"),
1098            exp.TsOrDsToDatetime: rename_func("DATETIME"),
1099            exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
1100            exp.Unhex: rename_func("FROM_HEX"),
1101            exp.UnixDate: rename_func("UNIX_DATE"),
1102            exp.UnixToTime: _unix_to_time_sql,
1103            exp.Uuid: lambda *_: "GENERATE_UUID()",
1104            exp.Values: _derived_table_values_to_unnest,
1105            exp.VariancePop: rename_func("VAR_POP"),
1106            exp.SafeDivide: rename_func("SAFE_DIVIDE"),
1107        }
1108
1109        SUPPORTED_JSON_PATH_PARTS = {
1110            exp.JSONPathKey,
1111            exp.JSONPathRoot,
1112            exp.JSONPathSubscript,
1113        }
1114
1115        TYPE_MAPPING = {
1116            **generator.Generator.TYPE_MAPPING,
1117            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
1118            exp.DataType.Type.BIGINT: "INT64",
1119            exp.DataType.Type.BINARY: "BYTES",
1120            exp.DataType.Type.BLOB: "BYTES",
1121            exp.DataType.Type.BOOLEAN: "BOOL",
1122            exp.DataType.Type.CHAR: "STRING",
1123            exp.DataType.Type.DECIMAL: "NUMERIC",
1124            exp.DataType.Type.DOUBLE: "FLOAT64",
1125            exp.DataType.Type.FLOAT: "FLOAT64",
1126            exp.DataType.Type.INT: "INT64",
1127            exp.DataType.Type.NCHAR: "STRING",
1128            exp.DataType.Type.NVARCHAR: "STRING",
1129            exp.DataType.Type.SMALLINT: "INT64",
1130            exp.DataType.Type.TEXT: "STRING",
1131            exp.DataType.Type.TIMESTAMP: "DATETIME",
1132            exp.DataType.Type.TIMESTAMPNTZ: "DATETIME",
1133            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
1134            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
1135            exp.DataType.Type.TINYINT: "INT64",
1136            exp.DataType.Type.ROWVERSION: "BYTES",
1137            exp.DataType.Type.UUID: "STRING",
1138            exp.DataType.Type.VARBINARY: "BYTES",
1139            exp.DataType.Type.VARCHAR: "STRING",
1140            exp.DataType.Type.VARIANT: "ANY TYPE",
1141        }
1142
1143        PROPERTIES_LOCATION = {
1144            **generator.Generator.PROPERTIES_LOCATION,
1145            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1146            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1147        }
1148
1149        # WINDOW comes after QUALIFY
1150        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
1151        AFTER_HAVING_MODIFIER_TRANSFORMS = {
1152            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
1153            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
1154        }
1155
1156        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
1157        RESERVED_KEYWORDS = {
1158            "all",
1159            "and",
1160            "any",
1161            "array",
1162            "as",
1163            "asc",
1164            "assert_rows_modified",
1165            "at",
1166            "between",
1167            "by",
1168            "case",
1169            "cast",
1170            "collate",
1171            "contains",
1172            "create",
1173            "cross",
1174            "cube",
1175            "current",
1176            "default",
1177            "define",
1178            "desc",
1179            "distinct",
1180            "else",
1181            "end",
1182            "enum",
1183            "escape",
1184            "except",
1185            "exclude",
1186            "exists",
1187            "extract",
1188            "false",
1189            "fetch",
1190            "following",
1191            "for",
1192            "from",
1193            "full",
1194            "group",
1195            "grouping",
1196            "groups",
1197            "hash",
1198            "having",
1199            "if",
1200            "ignore",
1201            "in",
1202            "inner",
1203            "intersect",
1204            "interval",
1205            "into",
1206            "is",
1207            "join",
1208            "lateral",
1209            "left",
1210            "like",
1211            "limit",
1212            "lookup",
1213            "merge",
1214            "natural",
1215            "new",
1216            "no",
1217            "not",
1218            "null",
1219            "nulls",
1220            "of",
1221            "on",
1222            "or",
1223            "order",
1224            "outer",
1225            "over",
1226            "partition",
1227            "preceding",
1228            "proto",
1229            "qualify",
1230            "range",
1231            "recursive",
1232            "respect",
1233            "right",
1234            "rollup",
1235            "rows",
1236            "select",
1237            "set",
1238            "some",
1239            "struct",
1240            "tablesample",
1241            "then",
1242            "to",
1243            "treat",
1244            "true",
1245            "unbounded",
1246            "union",
1247            "unnest",
1248            "using",
1249            "when",
1250            "where",
1251            "window",
1252            "with",
1253            "within",
1254        }
1255
1256        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1257            unit = expression.unit
1258            unit_sql = unit.name if unit.is_string else self.sql(unit)
1259            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
1260
1261        def mod_sql(self, expression: exp.Mod) -> str:
1262            this = expression.this
1263            expr = expression.expression
1264            return self.func(
1265                "MOD",
1266                this.unnest() if isinstance(this, exp.Paren) else this,
1267                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1268            )
1269
1270        def column_parts(self, expression: exp.Column) -> str:
1271            if expression.meta.get("quoted_column"):
1272                # If a column reference is of the form `dataset.table`.name, we need
1273                # to preserve the quoted table path, otherwise the reference breaks
1274                table_parts = ".".join(p.name for p in expression.parts[:-1])
1275                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1276                return f"{table_path}.{self.sql(expression, 'this')}"
1277
1278            return super().column_parts(expression)
1279
1280        def table_parts(self, expression: exp.Table) -> str:
1281            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1282            # we need to make sure the correct quoting is used in each case.
1283            #
1284            # For example, if there is a CTE x that clashes with a schema name, then the former will
1285            # return the table y in that schema, whereas the latter will return the CTE's y column:
1286            #
1287            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1288            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1289            if expression.meta.get("quoted_table"):
1290                table_parts = ".".join(p.name for p in expression.parts)
1291                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1292
1293            return super().table_parts(expression)
1294
1295        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1296            this = expression.this
1297            if isinstance(this, exp.TsOrDsToDatetime):
1298                func_name = "FORMAT_DATETIME"
1299            elif isinstance(this, exp.TsOrDsToTimestamp):
1300                func_name = "FORMAT_TIMESTAMP"
1301            else:
1302                func_name = "FORMAT_DATE"
1303
1304            time_expr = (
1305                this
1306                if isinstance(this, (exp.TsOrDsToDatetime, exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
1307                else expression
1308            )
1309            return self.func(
1310                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1311            )
1312
1313        def eq_sql(self, expression: exp.EQ) -> str:
1314            # Operands of = cannot be NULL in BigQuery
1315            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1316                if not isinstance(expression.parent, exp.Update):
1317                    return "NULL"
1318
1319            return self.binary(expression, "=")
1320
1321        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1322            parent = expression.parent
1323
1324            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1325            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1326            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1327                return self.func(
1328                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1329                )
1330
1331            return super().attimezone_sql(expression)
1332
1333        def trycast_sql(self, expression: exp.TryCast) -> str:
1334            return self.cast_sql(expression, safe_prefix="SAFE_")
1335
1336        def bracket_sql(self, expression: exp.Bracket) -> str:
1337            this = expression.this
1338            expressions = expression.expressions
1339
1340            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1341                arg = expressions[0]
1342                if arg.type is None:
1343                    from sqlglot.optimizer.annotate_types import annotate_types
1344
1345                    arg = annotate_types(arg, dialect=self.dialect)
1346
1347                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1348                    # BQ doesn't support bracket syntax with string values for structs
1349                    return f"{self.sql(this)}.{arg.name}"
1350
1351            expressions_sql = self.expressions(expression, flat=True)
1352            offset = expression.args.get("offset")
1353
1354            if offset == 0:
1355                expressions_sql = f"OFFSET({expressions_sql})"
1356            elif offset == 1:
1357                expressions_sql = f"ORDINAL({expressions_sql})"
1358            elif offset is not None:
1359                self.unsupported(f"Unsupported array offset: {offset}")
1360
1361            if expression.args.get("safe"):
1362                expressions_sql = f"SAFE_{expressions_sql}"
1363
1364            return f"{self.sql(this)}[{expressions_sql}]"
1365
1366        def in_unnest_op(self, expression: exp.Unnest) -> str:
1367            return self.sql(expression)
1368
1369        def version_sql(self, expression: exp.Version) -> str:
1370            if expression.name == "TIMESTAMP":
1371                expression.set("this", "SYSTEM_TIME")
1372            return super().version_sql(expression)
1373
1374        def contains_sql(self, expression: exp.Contains) -> str:
1375            this = expression.this
1376            expr = expression.expression
1377
1378            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1379                this = this.this
1380                expr = expr.this
1381
1382            return self.func("CONTAINS_SUBSTR", this, expr)
1383
1384        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1385            this = expression.this
1386
1387            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1388            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1389            # because they aren't literals and so the above syntax is invalid BigQuery.
1390            if isinstance(this, exp.Array):
1391                elem = seq_get(this.expressions, 0)
1392                if not (elem and elem.find(exp.Query)):
1393                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1394
1395            return super().cast_sql(expression, safe_prefix=safe_prefix)
1396
1397        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1398            variables = self.expressions(expression, "this")
1399            default = self.sql(expression, "default")
1400            default = f" DEFAULT {default}" if default else ""
1401            kind = self.sql(expression, "kind")
1402            kind = f" {kind}" if kind else ""
1403
1404            return f"{variables}{kind}{default}"
WEEK_OFFSET = -1

First day of the week in DATE_TRUNC(week). Defaults to 0 (Monday). -1 would be Sunday.

UNNEST_COLUMN_ONLY = True

Whether UNNEST table aliases are treated as column aliases.

SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

LOG_BASE_FIRST: Optional[bool] = False

Whether the base comes first in the LOG function. Possible values: True, False, None (two arguments are not supported by LOG)

HEX_LOWERCASE = True

Whether the HEX function returns a lowercase hexadecimal string.

FORCE_EARLY_ALIAS_REF_EXPANSION = True

Whether alias reference expansion (_expand_alias_refs()) should run before column qualification (_qualify_columns()).

For example:

WITH data AS ( SELECT 1 AS id, 2 AS my_id ) SELECT id AS my_id FROM data WHERE my_id = 1 GROUP BY my_id, HAVING my_id = 1

In most dialects, "my_id" would refer to "data.my_id" across the query, except: - BigQuery, which will forward the alias to GROUP BY + HAVING clauses i.e it resolves to "WHERE my_id = 1 GROUP BY id HAVING id = 1" - Clickhouse, which will forward the alias across the query i.e it resolves to "WHERE id = 1 GROUP BY id HAVING id = 1"

PRESERVE_ORIGINAL_NAMES: bool = True

Whether the name of the function should be preserved inside the node's metadata, can be useful for roundtripping deprecated vs new functions that share an AST node e.g JSON_VALUE vs JSON_EXTRACT_SCALAR in BigQuery

HEX_STRING_IS_INTEGER_TYPE: bool = True

Whether hex strings such as x'CC' evaluate to integer or binary/blob type

NORMALIZATION_STRATEGY = <NormalizationStrategy.CASE_INSENSITIVE: 'CASE_INSENSITIVE'>

Specifies the strategy according to which identifiers should be normalized.

NORMALIZE_FUNCTIONS: bool | str = False

Determines how function names are going to be normalized.

Possible values:

"upper" or True: Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.

TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y', '%E6S': '%S.%f', '%e': '%-d'}

Associates this dialect's time formats with their equivalent Python strftime formats.

FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}

Helper which is used for parsing the special syntax CAST(x AS DATE FORMAT 'yyyy'). If empty, the corresponding trie will be constructed off of TIME_MAPPING.

PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_TABLE_SUFFIX', '_PARTITIONTIME', '_FILE_NAME'}

Columns that are auto-generated by the engine corresponding to this dialect. For example, such columns may be excluded from SELECT * queries.

SET_OP_DISTINCT_BY_DEFAULT: Dict[Type[sqlglot.expressions.Expression], Optional[bool]] = {<class 'sqlglot.expressions.Except'>: None, <class 'sqlglot.expressions.Intersect'>: None, <class 'sqlglot.expressions.Union'>: None}

Whether a set operation uses DISTINCT by default. This is None when either DISTINCT or ALL must be explicitly specified.

TYPE_TO_EXPRESSIONS: Dict[sqlglot.expressions.DataType.Type, Set[Type[sqlglot.expressions.Expression]]] = {<Type.BIGINT: 'BIGINT'>: {<class 'sqlglot.expressions.Int64'>, <class 'sqlglot.expressions.ArraySize'>, <class 'sqlglot.expressions.ApproxDistinct'>, <class 'sqlglot.expressions.CountIf'>, <class 'sqlglot.expressions.UnixSeconds'>, <class 'sqlglot.expressions.UnixDate'>, <class 'sqlglot.expressions.Length'>}, <Type.BINARY: 'BINARY'>: {<class 'sqlglot.expressions.FromBase64'>}, <Type.BOOLEAN: 'BOOLEAN'>: {<class 'sqlglot.expressions.EndsWith'>, <class 'sqlglot.expressions.StartsWith'>, <class 'sqlglot.expressions.LogicalAnd'>, <class 'sqlglot.expressions.LogicalOr'>, <class 'sqlglot.expressions.RegexpLike'>, <class 'sqlglot.expressions.Boolean'>, <class 'sqlglot.expressions.In'>, <class 'sqlglot.expressions.Between'>}, <Type.DATE: 'DATE'>: {<class 'sqlglot.expressions.DateStrToDate'>, <class 'sqlglot.expressions.TimeStrToDate'>, <class 'sqlglot.expressions.DiToDate'>, <class 'sqlglot.expressions.TsOrDsToDate'>, <class 'sqlglot.expressions.DateFromParts'>, <class 'sqlglot.expressions.StrToDate'>, <class 'sqlglot.expressions.CurrentDate'>, <class 'sqlglot.expressions.Date'>}, <Type.DATETIME: 'DATETIME'>: {<class 'sqlglot.expressions.CurrentDatetime'>, <class 'sqlglot.expressions.DatetimeAdd'>, <class 'sqlglot.expressions.Datetime'>, <class 'sqlglot.expressions.DatetimeSub'>}, <Type.DOUBLE: 'DOUBLE'>: {<class 'sqlglot.expressions.StddevSamp'>, <class 'sqlglot.expressions.ApproxQuantile'>, <class 'sqlglot.expressions.StddevPop'>, <class 'sqlglot.expressions.ToDouble'>, <class 'sqlglot.expressions.Exp'>, <class 'sqlglot.expressions.Quantile'>, <class 'sqlglot.expressions.Stddev'>, <class 'sqlglot.expressions.Sqrt'>, <class 'sqlglot.expressions.Log'>, <class 'sqlglot.expressions.SafeDivide'>, <class 'sqlglot.expressions.Ln'>, <class 'sqlglot.expressions.Avg'>, <class 'sqlglot.expressions.Pow'>, <class 'sqlglot.expressions.VariancePop'>, <class 'sqlglot.expressions.Round'>, <class 'sqlglot.expressions.Variance'>}, <Type.INT: 'INT'>: {<class 'sqlglot.expressions.DatetimeDiff'>, <class 'sqlglot.expressions.TimestampDiff'>, <class 'sqlglot.expressions.DateToDi'>, <class 'sqlglot.expressions.Sign'>, <class 'sqlglot.expressions.Ceil'>, <class 'sqlglot.expressions.TsOrDiToDi'>, <class 'sqlglot.expressions.Unicode'>, <class 'sqlglot.expressions.DateDiff'>, <class 'sqlglot.expressions.Levenshtein'>, <class 'sqlglot.expressions.Ascii'>, <class 'sqlglot.expressions.TimeDiff'>, <class 'sqlglot.expressions.StrPosition'>}, <Type.INTERVAL: 'INTERVAL'>: {<class 'sqlglot.expressions.Interval'>, <class 'sqlglot.expressions.MakeInterval'>}, <Type.JSON: 'JSON'>: {<class 'sqlglot.expressions.ParseJSON'>}, <Type.TIME: 'TIME'>: {<class 'sqlglot.expressions.Time'>, <class 'sqlglot.expressions.CurrentTime'>, <class 'sqlglot.expressions.TimeAdd'>, <class 'sqlglot.expressions.TimeSub'>}, <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: {<class 'sqlglot.expressions.TimestampSub'>, <class 'sqlglot.expressions.TimeStrToTime'>, <class 'sqlglot.expressions.TimestampAdd'>, <class 'sqlglot.expressions.CurrentTimestamp'>, <class 'sqlglot.expressions.UnixToTime'>, <class 'sqlglot.expressions.StrToTime'>}, <Type.TINYINT: 'TINYINT'>: {<class 'sqlglot.expressions.Year'>, <class 'sqlglot.expressions.Month'>, <class 'sqlglot.expressions.Week'>, <class 'sqlglot.expressions.Day'>, <class 'sqlglot.expressions.Quarter'>}, <Type.VARCHAR: 'VARCHAR'>: {<class 'sqlglot.expressions.Trim'>, <class 'sqlglot.expressions.Lower'>, <class 'sqlglot.expressions.Upper'>, <class 'sqlglot.expressions.UnixToTimeStr'>, <class 'sqlglot.expressions.TimeToStr'>, <class 'sqlglot.expressions.ArrayConcat'>, <class 'sqlglot.expressions.DPipe'>, <class 'sqlglot.expressions.GroupConcat'>, <class 'sqlglot.expressions.TsOrDsToDateStr'>, <class 'sqlglot.expressions.Concat'>, <class 'sqlglot.expressions.ToBase64'>, <class 'sqlglot.expressions.DateToDateStr'>, <class 'sqlglot.expressions.Initcap'>, <class 'sqlglot.expressions.String'>, <class 'sqlglot.expressions.UnixToStr'>, <class 'sqlglot.expressions.TimeToTimeStr'>, <class 'sqlglot.expressions.ArrayToString'>, <class 'sqlglot.expressions.Substring'>, <class 'sqlglot.expressions.ConcatWs'>, <class 'sqlglot.expressions.Chr'>}}
ANNOTATORS: Dict[Type[~E], Callable[[sqlglot.optimizer.annotate_types.TypeAnnotator, ~E], ~E]] = {<class 'sqlglot.expressions.Alias'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseNot'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Neg'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Not'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Paren'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.PivotAlias'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Unary'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Add'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.And'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Binary'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseAnd'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseLeftShift'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseOr'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseRightShift'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Collate'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Connector'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Corr'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CovarPop'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.CovarSamp'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.DPipe'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Distance'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Div'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Dot'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.EQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Escape'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.GT'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.GTE'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Glob'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.IntDiv'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Is'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONArrayContains'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONBContains'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONBExtract'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONBExtractScalar'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Kwarg'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.LT'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.LTE'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Like'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Mod'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Mul'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.NEQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.NullSafeEQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.NullSafeNEQ'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Or'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Overlaps'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Pow'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.PropertyEQ'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.RegexpILike'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.RegexpLike'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.SimilarTo'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Slice'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Sub'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function Dialect.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CountIf'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixSeconds'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Length'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.FromBase64'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.EndsWith'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Boolean'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.In'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Between'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeStrToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DiToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentDate'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Date'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentDatetime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DatetimeAdd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Datetime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DatetimeSub'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StddevSamp'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ApproxQuantile'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StddevPop'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Exp'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Quantile'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Stddev'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Sqrt'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Log'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.SafeDivide'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Ln'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Avg'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.VariancePop'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Round'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Variance'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DatetimeDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateToDi'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Sign'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.TsOrDiToDi'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Unicode'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.DateDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Ascii'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimeDiff'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Interval'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ParseJSON'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Time'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeSub'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentTimestampLTZ'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampSub'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeStrToTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampAdd'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.CurrentTimestamp'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Year'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Month'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Week'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Day'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Quarter'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Lower'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Upper'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.UnixToTimeStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeToStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayConcat'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDateStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Concat'>: <function _annotate_concat>, <class 'sqlglot.expressions.ToBase64'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.DateToDateStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Initcap'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.String'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeToTimeStr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayToString'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Substring'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.ConcatWs'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Chr'>: <function annotate_with_type_lambda.<locals>.<lambda>>, <class 'sqlglot.expressions.Abs'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Anonymous'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Array'>: <function _annotate_array>, <class 'sqlglot.expressions.AnyValue'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayAgg'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayConcatAgg'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayFirst'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayLast'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArrayReverse'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ArraySlice'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Bracket'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Case'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Coalesce'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Count'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DataType'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DateSub'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.DateTrunc'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Distinct'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Filter'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.GenerateDateArray'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.GenerateTimestampArray'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Greatest'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.If'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Least'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Literal'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.LastValue'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Max'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Min'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Null'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Nullif'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Struct'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Sum'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.SortArray'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.TryCast'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Unnest'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Window'>: <function Dialect.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Left'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Right'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Pad'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.Repeat'>: <function BigQuery.<dictcomp>.<lambda>>, <class 'sqlglot.expressions.BitwiseAndAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseOrAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseXorAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.BitwiseCountAgg'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONArray'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.JSONType'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Lag'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.Split'>: <function BigQuery.<lambda>>, <class 'sqlglot.expressions.TimestampFromParts'>: <function BigQuery.<lambda>>}
def normalize_identifier(self, expression: ~E) -> ~E:
508    def normalize_identifier(self, expression: E) -> E:
509        if (
510            isinstance(expression, exp.Identifier)
511            and self.normalization_strategy is NormalizationStrategy.CASE_INSENSITIVE
512        ):
513            parent = expression.parent
514            while isinstance(parent, exp.Dot):
515                parent = parent.parent
516
517            # In BigQuery, CTEs are case-insensitive, but UDF and table names are case-sensitive
518            # by default. The following check uses a heuristic to detect tables based on whether
519            # they are qualified. This should generally be correct, because tables in BigQuery
520            # must be qualified with at least a dataset, unless @@dataset_id is set.
521            case_sensitive = (
522                isinstance(parent, exp.UserDefinedFunction)
523                or (
524                    isinstance(parent, exp.Table)
525                    and parent.db
526                    and (parent.meta.get("quoted_table") or not parent.meta.get("maybe_column"))
527                )
528                or expression.meta.get("is_table")
529            )
530            if not case_sensitive:
531                expression.set("this", expression.this.lower())
532
533            return t.cast(E, expression)
534
535        return super().normalize_identifier(expression)

Transforms an identifier in a way that resembles how it'd be resolved by this dialect.

For example, an identifier like FoO would be resolved as foo in Postgres, because it lowercases all unquoted identifiers. On the other hand, Snowflake uppercases them, so it would resolve it as FOO. If it was quoted, it'd need to be treated as case-sensitive, and so any normalization would be prohibited in order to avoid "breaking" the identifier.

There are also dialects like Spark, which are case-insensitive even when quotes are present, and dialects like MySQL, whose resolution rules match those employed by the underlying operating system, for example they may always be case-sensitive in Linux.

Finally, the normalization behavior of some engines can even be controlled through flags, like in Redshift's case, where users can explicitly set enable_case_sensitive_identifier.

SQLGlot aims to understand and handle all of these different behaviors gracefully, so that it can analyze queries in the optimizer and successfully capture their semantics.

SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'BigQuery.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}, 'E': {'6': {'S': {0: True}}}, 'e': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D', '%S.%f': '%E6S', '%-d': '%e'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}, 'S': {'.': {'%': {'f': {0: True}}}}, '-': {'d': {0: True}}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {'%d': 'DD', '%m': 'MM', '%b': 'MON', '%B': 'MONTH', '%Y': 'YYYY', '%y': 'YY', '%I': 'HH12', '%H': 'HH24', '%M': 'MI', '%S': 'SS', '%f': 'SSSSS', '%z': 'TZH'}
INVERSE_FORMAT_TRIE: Dict = {'%': {'d': {0: True}, 'm': {0: True}, 'b': {0: True}, 'B': {0: True}, 'Y': {0: True}, 'y': {0: True}, 'I': {0: True}, 'H': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}, 'z': {0: True}}}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
537    class Tokenizer(tokens.Tokenizer):
538        QUOTES = ["'", '"', '"""', "'''"]
539        COMMENTS = ["--", "#", ("/*", "*/")]
540        IDENTIFIERS = ["`"]
541        STRING_ESCAPES = ["\\"]
542
543        HEX_STRINGS = [("0x", ""), ("0X", "")]
544
545        BYTE_STRINGS = [
546            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
547        ]
548
549        RAW_STRINGS = [
550            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
551        ]
552
553        NESTED_COMMENTS = False
554
555        KEYWORDS = {
556            **tokens.Tokenizer.KEYWORDS,
557            "ANY TYPE": TokenType.VARIANT,
558            "BEGIN": TokenType.COMMAND,
559            "BEGIN TRANSACTION": TokenType.BEGIN,
560            "BYTEINT": TokenType.INT,
561            "BYTES": TokenType.BINARY,
562            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
563            "DATETIME": TokenType.TIMESTAMP,
564            "DECLARE": TokenType.DECLARE,
565            "ELSEIF": TokenType.COMMAND,
566            "EXCEPTION": TokenType.COMMAND,
567            "EXPORT": TokenType.EXPORT,
568            "FLOAT64": TokenType.DOUBLE,
569            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
570            "MODEL": TokenType.MODEL,
571            "NOT DETERMINISTIC": TokenType.VOLATILE,
572            "RECORD": TokenType.STRUCT,
573            "TIMESTAMP": TokenType.TIMESTAMPTZ,
574        }
575        KEYWORDS.pop("DIV")
576        KEYWORDS.pop("VALUES")
577        KEYWORDS.pop("/*+")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '|>': <TokenType.PIPE_GT: 'PIPE_GT'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_SCHEMA': <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NAMESPACE': <TokenType.NAMESPACE: 'NAMESPACE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT256': <TokenType.INT256: 'INT256'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'UINT128': <TokenType.UINT128: 'UINT128'>, 'UINT256': <TokenType.UINT256: 'UINT256'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.ANALYZE: 'ANALYZE'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'DECLARE': <TokenType.DECLARE: 'DECLARE'>, 'ELSEIF': <TokenType.COMMAND: 'COMMAND'>, 'EXCEPTION': <TokenType.COMMAND: 'COMMAND'>, 'EXPORT': <TokenType.EXPORT: 'EXPORT'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'MODEL': <TokenType.MODEL: 'MODEL'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>}
class BigQuery.Parser(sqlglot.parser.Parser):
579    class Parser(parser.Parser):
580        PREFIXED_PIVOT_COLUMNS = True
581        LOG_DEFAULTS_TO_LN = True
582        SUPPORTS_IMPLICIT_UNNEST = True
583        JOINS_HAVE_EQUAL_PRECEDENCE = True
584
585        # BigQuery does not allow ASC/DESC to be used as an identifier
586        ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
587        ALIAS_TOKENS = parser.Parser.ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
588        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
589        COMMENT_TABLE_ALIAS_TOKENS = parser.Parser.COMMENT_TABLE_ALIAS_TOKENS - {
590            TokenType.ASC,
591            TokenType.DESC,
592        }
593        UPDATE_ALIAS_TOKENS = parser.Parser.UPDATE_ALIAS_TOKENS - {TokenType.ASC, TokenType.DESC}
594
595        FUNCTIONS = {
596            **parser.Parser.FUNCTIONS,
597            "CONTAINS_SUBSTR": _build_contains_substring,
598            "DATE": _build_date,
599            "DATE_ADD": build_date_delta_with_interval(exp.DateAdd),
600            "DATE_SUB": build_date_delta_with_interval(exp.DateSub),
601            "DATE_TRUNC": lambda args: exp.DateTrunc(
602                unit=seq_get(args, 1),
603                this=seq_get(args, 0),
604                zone=seq_get(args, 2),
605            ),
606            "DATETIME": _build_datetime,
607            "DATETIME_ADD": build_date_delta_with_interval(exp.DatetimeAdd),
608            "DATETIME_SUB": build_date_delta_with_interval(exp.DatetimeSub),
609            "DIV": binary_from_function(exp.IntDiv),
610            "EDIT_DISTANCE": _build_levenshtein,
611            "FORMAT_DATE": _build_format_time(exp.TsOrDsToDate),
612            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
613            "JSON_EXTRACT_SCALAR": _build_extract_json_with_default_path(exp.JSONExtractScalar),
614            "JSON_EXTRACT_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
615            "JSON_QUERY": parser.build_extract_json_with_path(exp.JSONExtract),
616            "JSON_QUERY_ARRAY": _build_extract_json_with_default_path(exp.JSONExtractArray),
617            "JSON_VALUE": _build_extract_json_with_default_path(exp.JSONExtractScalar),
618            "JSON_VALUE_ARRAY": _build_extract_json_with_default_path(exp.JSONValueArray),
619            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
620            "MD5": exp.MD5Digest.from_arg_list,
621            "TO_HEX": _build_to_hex,
622            "PARSE_DATE": lambda args: build_formatted_time(exp.StrToDate, "bigquery")(
623                [seq_get(args, 1), seq_get(args, 0)]
624            ),
625            "PARSE_TIMESTAMP": _build_parse_timestamp,
626            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
627            "REGEXP_EXTRACT": _build_regexp_extract(exp.RegexpExtract),
628            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
629            "REGEXP_EXTRACT_ALL": _build_regexp_extract(
630                exp.RegexpExtractAll, default_group=exp.Literal.number(0)
631            ),
632            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
633            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
634            "SPLIT": lambda args: exp.Split(
635                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
636                this=seq_get(args, 0),
637                expression=seq_get(args, 1) or exp.Literal.string(","),
638            ),
639            "STRPOS": exp.StrPosition.from_arg_list,
640            "TIME": _build_time,
641            "TIME_ADD": build_date_delta_with_interval(exp.TimeAdd),
642            "TIME_SUB": build_date_delta_with_interval(exp.TimeSub),
643            "TIMESTAMP": _build_timestamp,
644            "TIMESTAMP_ADD": build_date_delta_with_interval(exp.TimestampAdd),
645            "TIMESTAMP_SUB": build_date_delta_with_interval(exp.TimestampSub),
646            "TIMESTAMP_MICROS": lambda args: exp.UnixToTime(
647                this=seq_get(args, 0), scale=exp.UnixToTime.MICROS
648            ),
649            "TIMESTAMP_MILLIS": lambda args: exp.UnixToTime(
650                this=seq_get(args, 0), scale=exp.UnixToTime.MILLIS
651            ),
652            "TIMESTAMP_SECONDS": lambda args: exp.UnixToTime(this=seq_get(args, 0)),
653            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
654            "FORMAT_DATETIME": _build_format_time(exp.TsOrDsToDatetime),
655            "FORMAT_TIMESTAMP": _build_format_time(exp.TsOrDsToTimestamp),
656        }
657
658        FUNCTION_PARSERS = {
659            **parser.Parser.FUNCTION_PARSERS,
660            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
661            "JSON_ARRAY": lambda self: self.expression(
662                exp.JSONArray, expressions=self._parse_csv(self._parse_bitwise)
663            ),
664            "MAKE_INTERVAL": lambda self: self._parse_make_interval(),
665            "FEATURES_AT_TIME": lambda self: self._parse_features_at_time(),
666        }
667        FUNCTION_PARSERS.pop("TRIM")
668
669        NO_PAREN_FUNCTIONS = {
670            **parser.Parser.NO_PAREN_FUNCTIONS,
671            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
672        }
673
674        NESTED_TYPE_TOKENS = {
675            *parser.Parser.NESTED_TYPE_TOKENS,
676            TokenType.TABLE,
677        }
678
679        PROPERTY_PARSERS = {
680            **parser.Parser.PROPERTY_PARSERS,
681            "NOT DETERMINISTIC": lambda self: self.expression(
682                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
683            ),
684            "OPTIONS": lambda self: self._parse_with_property(),
685        }
686
687        CONSTRAINT_PARSERS = {
688            **parser.Parser.CONSTRAINT_PARSERS,
689            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
690        }
691
692        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
693        RANGE_PARSERS.pop(TokenType.OVERLAPS)
694
695        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
696
697        DASHED_TABLE_PART_FOLLOW_TOKENS = {TokenType.DOT, TokenType.L_PAREN, TokenType.R_PAREN}
698
699        STATEMENT_PARSERS = {
700            **parser.Parser.STATEMENT_PARSERS,
701            TokenType.ELSE: lambda self: self._parse_as_command(self._prev),
702            TokenType.END: lambda self: self._parse_as_command(self._prev),
703            TokenType.FOR: lambda self: self._parse_for_in(),
704            TokenType.EXPORT: lambda self: self._parse_export_data(),
705            TokenType.DECLARE: lambda self: self._parse_declare(),
706        }
707
708        BRACKET_OFFSETS = {
709            "OFFSET": (0, False),
710            "ORDINAL": (1, False),
711            "SAFE_OFFSET": (0, True),
712            "SAFE_ORDINAL": (1, True),
713        }
714
715        def _parse_for_in(self) -> exp.ForIn:
716            this = self._parse_range()
717            self._match_text_seq("DO")
718            return self.expression(exp.ForIn, this=this, expression=self._parse_statement())
719
720        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
721            this = super()._parse_table_part(schema=schema) or self._parse_number()
722
723            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
724            if isinstance(this, exp.Identifier):
725                table_name = this.name
726                while self._match(TokenType.DASH, advance=False) and self._next:
727                    start = self._curr
728                    while self._is_connected() and not self._match_set(
729                        self.DASHED_TABLE_PART_FOLLOW_TOKENS, advance=False
730                    ):
731                        self._advance()
732
733                    if start == self._curr:
734                        break
735
736                    table_name += self._find_sql(start, self._prev)
737
738                this = exp.Identifier(
739                    this=table_name, quoted=this.args.get("quoted")
740                ).update_positions(this)
741            elif isinstance(this, exp.Literal):
742                table_name = this.name
743
744                if self._is_connected() and self._parse_var(any_token=True):
745                    table_name += self._prev.text
746
747                this = exp.Identifier(this=table_name, quoted=True).update_positions(this)
748
749            return this
750
751        def _parse_table_parts(
752            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
753        ) -> exp.Table:
754            table = super()._parse_table_parts(
755                schema=schema, is_db_reference=is_db_reference, wildcard=True
756            )
757
758            # proj-1.db.tbl -- `1.` is tokenized as a float so we need to unravel it here
759            if not table.catalog:
760                if table.db:
761                    previous_db = table.args["db"]
762                    parts = table.db.split(".")
763                    if len(parts) == 2 and not table.args["db"].quoted:
764                        table.set(
765                            "catalog", exp.Identifier(this=parts[0]).update_positions(previous_db)
766                        )
767                        table.set("db", exp.Identifier(this=parts[1]).update_positions(previous_db))
768                else:
769                    previous_this = table.this
770                    parts = table.name.split(".")
771                    if len(parts) == 2 and not table.this.quoted:
772                        table.set(
773                            "db", exp.Identifier(this=parts[0]).update_positions(previous_this)
774                        )
775                        table.set(
776                            "this", exp.Identifier(this=parts[1]).update_positions(previous_this)
777                        )
778
779            if isinstance(table.this, exp.Identifier) and any("." in p.name for p in table.parts):
780                alias = table.this
781                catalog, db, this, *rest = (
782                    exp.to_identifier(p, quoted=True)
783                    for p in split_num_words(".".join(p.name for p in table.parts), ".", 3)
784                )
785
786                for part in (catalog, db, this):
787                    if part:
788                        part.update_positions(table.this)
789
790                if rest and this:
791                    this = exp.Dot.build([this, *rest])  # type: ignore
792
793                table = exp.Table(
794                    this=this, db=db, catalog=catalog, pivots=table.args.get("pivots")
795                )
796                table.meta["quoted_table"] = True
797            else:
798                alias = None
799
800            # The `INFORMATION_SCHEMA` views in BigQuery need to be qualified by a region or
801            # dataset, so if the project identifier is omitted we need to fix the ast so that
802            # the `INFORMATION_SCHEMA.X` bit is represented as a single (quoted) Identifier.
803            # Otherwise, we wouldn't correctly qualify a `Table` node that references these
804            # views, because it would seem like the "catalog" part is set, when it'd actually
805            # be the region/dataset. Merging the two identifiers into a single one is done to
806            # avoid producing a 4-part Table reference, which would cause issues in the schema
807            # module, when there are 3-part table names mixed with information schema views.
808            #
809            # See: https://cloud.google.com/bigquery/docs/information-schema-intro#syntax
810            table_parts = table.parts
811            if len(table_parts) > 1 and table_parts[-2].name.upper() == "INFORMATION_SCHEMA":
812                # We need to alias the table here to avoid breaking existing qualified columns.
813                # This is expected to be safe, because if there's an actual alias coming up in
814                # the token stream, it will overwrite this one. If there isn't one, we are only
815                # exposing the name that can be used to reference the view explicitly (a no-op).
816                exp.alias_(
817                    table,
818                    t.cast(exp.Identifier, alias or table_parts[-1]),
819                    table=True,
820                    copy=False,
821                )
822
823                info_schema_view = f"{table_parts[-2].name}.{table_parts[-1].name}"
824                new_this = exp.Identifier(this=info_schema_view, quoted=True).update_positions(
825                    line=table_parts[-2].meta.get("line"),
826                    col=table_parts[-1].meta.get("col"),
827                    start=table_parts[-2].meta.get("start"),
828                    end=table_parts[-1].meta.get("end"),
829                )
830                table.set("this", new_this)
831                table.set("db", seq_get(table_parts, -3))
832                table.set("catalog", seq_get(table_parts, -4))
833
834            return table
835
836        def _parse_column(self) -> t.Optional[exp.Expression]:
837            column = super()._parse_column()
838            if isinstance(column, exp.Column):
839                parts = column.parts
840                if any("." in p.name for p in parts):
841                    catalog, db, table, this, *rest = (
842                        exp.to_identifier(p, quoted=True)
843                        for p in split_num_words(".".join(p.name for p in parts), ".", 4)
844                    )
845
846                    if rest and this:
847                        this = exp.Dot.build([this, *rest])  # type: ignore
848
849                    column = exp.Column(this=this, table=table, db=db, catalog=catalog)
850                    column.meta["quoted_column"] = True
851
852            return column
853
854        @t.overload
855        def _parse_json_object(self, agg: Lit[False]) -> exp.JSONObject: ...
856
857        @t.overload
858        def _parse_json_object(self, agg: Lit[True]) -> exp.JSONObjectAgg: ...
859
860        def _parse_json_object(self, agg=False):
861            json_object = super()._parse_json_object()
862            array_kv_pair = seq_get(json_object.expressions, 0)
863
864            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
865            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
866            if (
867                array_kv_pair
868                and isinstance(array_kv_pair.this, exp.Array)
869                and isinstance(array_kv_pair.expression, exp.Array)
870            ):
871                keys = array_kv_pair.this.expressions
872                values = array_kv_pair.expression.expressions
873
874                json_object.set(
875                    "expressions",
876                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
877                )
878
879            return json_object
880
881        def _parse_bracket(
882            self, this: t.Optional[exp.Expression] = None
883        ) -> t.Optional[exp.Expression]:
884            bracket = super()._parse_bracket(this)
885
886            if this is bracket:
887                return bracket
888
889            if isinstance(bracket, exp.Bracket):
890                for expression in bracket.expressions:
891                    name = expression.name.upper()
892
893                    if name not in self.BRACKET_OFFSETS:
894                        break
895
896                    offset, safe = self.BRACKET_OFFSETS[name]
897                    bracket.set("offset", offset)
898                    bracket.set("safe", safe)
899                    expression.replace(expression.expressions[0])
900
901            return bracket
902
903        def _parse_unnest(self, with_alias: bool = True) -> t.Optional[exp.Unnest]:
904            unnest = super()._parse_unnest(with_alias=with_alias)
905
906            if not unnest:
907                return None
908
909            unnest_expr = seq_get(unnest.expressions, 0)
910            if unnest_expr:
911                from sqlglot.optimizer.annotate_types import annotate_types
912
913                unnest_expr = annotate_types(unnest_expr, dialect=self.dialect)
914
915                # Unnesting a nested array (i.e array of structs) explodes the top-level struct fields,
916                # in contrast to other dialects such as DuckDB which flattens only the array by default
917                if unnest_expr.is_type(exp.DataType.Type.ARRAY) and any(
918                    array_elem.is_type(exp.DataType.Type.STRUCT)
919                    for array_elem in unnest_expr._type.expressions
920                ):
921                    unnest.set("explode_array", True)
922
923            return unnest
924
925        def _parse_make_interval(self) -> exp.MakeInterval:
926            expr = exp.MakeInterval()
927
928            for arg_key in expr.arg_types:
929                value = self._parse_lambda()
930
931                if not value:
932                    break
933
934                # Non-named arguments are filled sequentially, (optionally) followed by named arguments
935                # that can appear in any order e.g MAKE_INTERVAL(1, minute => 5, day => 2)
936                if isinstance(value, exp.Kwarg):
937                    arg_key = value.this.name
938
939                expr.set(arg_key, value)
940
941                self._match(TokenType.COMMA)
942
943            return expr
944
945        def _parse_features_at_time(self) -> exp.FeaturesAtTime:
946            expr = self.expression(
947                exp.FeaturesAtTime,
948                this=(self._match(TokenType.TABLE) and self._parse_table())
949                or self._parse_select(nested=True),
950            )
951
952            while self._match(TokenType.COMMA):
953                arg = self._parse_lambda()
954
955                # Get the LHS of the Kwarg and set the arg to that value, e.g
956                # "num_rows => 1" sets the expr's `num_rows` arg
957                if arg:
958                    expr.set(arg.this.name, arg)
959
960            return expr
961
962        def _parse_export_data(self) -> exp.Export:
963            self._match_text_seq("DATA")
964
965            return self.expression(
966                exp.Export,
967                connection=self._match_text_seq("WITH", "CONNECTION") and self._parse_table_parts(),
968                options=self._parse_properties(),
969                this=self._match_text_seq("AS") and self._parse_select(),
970            )

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_DEFAULTS_TO_LN = True
SUPPORTS_IMPLICIT_UNNEST = True
JOINS_HAVE_EQUAL_PRECEDENCE = True
ID_VAR_TOKENS = {<TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.PUT: 'PUT'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.ANTI: 'ANTI'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.COPY: 'COPY'>, <TokenType.UINT: 'UINT'>, <TokenType.TOP: 'TOP'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.TRUE: 'TRUE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NAME: 'NAME'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.SOME: 'SOME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.VAR: 'VAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.USE: 'USE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.POINT: 'POINT'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIT: 'BIT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.END: 'END'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ANY: 'ANY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TAG: 'TAG'>, <TokenType.INT128: 'INT128'>, <TokenType.GET: 'GET'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.ASOF: 'ASOF'>, <TokenType.NULL: 'NULL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SHOW: 'SHOW'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.FULL: 'FULL'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.JSON: 'JSON'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOID: 'VOID'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.INT256: 'INT256'>, <TokenType.KILL: 'KILL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SINK: 'SINK'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RING: 'RING'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.IS: 'IS'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.XML: 'XML'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATE: 'DATE'>, <TokenType.DETACH: 'DETACH'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.APPLY: 'APPLY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.LEFT: 'LEFT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ALL: 'ALL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CASE: 'CASE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SET: 'SET'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>}
ALIAS_TOKENS = {<TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.PUT: 'PUT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.ANTI: 'ANTI'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.COPY: 'COPY'>, <TokenType.UINT: 'UINT'>, <TokenType.TOP: 'TOP'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.TRUE: 'TRUE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NAME: 'NAME'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.SOME: 'SOME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.VAR: 'VAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.USE: 'USE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.POINT: 'POINT'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIT: 'BIT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.END: 'END'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ANY: 'ANY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TAG: 'TAG'>, <TokenType.INT128: 'INT128'>, <TokenType.GET: 'GET'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.ASOF: 'ASOF'>, <TokenType.NULL: 'NULL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SHOW: 'SHOW'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.FULL: 'FULL'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.JSON: 'JSON'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOID: 'VOID'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.INT256: 'INT256'>, <TokenType.KILL: 'KILL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SINK: 'SINK'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RING: 'RING'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.IS: 'IS'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.XML: 'XML'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DETACH: 'DETACH'>, <TokenType.DATE: 'DATE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.APPLY: 'APPLY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.LEFT: 'LEFT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ALL: 'ALL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CASE: 'CASE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SET: 'SET'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>}
TABLE_ALIAS_TOKENS = {<TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.PUT: 'PUT'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.COPY: 'COPY'>, <TokenType.UINT: 'UINT'>, <TokenType.TOP: 'TOP'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.TRUE: 'TRUE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NAME: 'NAME'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.SOME: 'SOME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.VAR: 'VAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.USE: 'USE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.POINT: 'POINT'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIT: 'BIT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.END: 'END'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ANY: 'ANY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TAG: 'TAG'>, <TokenType.INT128: 'INT128'>, <TokenType.GET: 'GET'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.NULL: 'NULL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SHOW: 'SHOW'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.JSON: 'JSON'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOID: 'VOID'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.INT256: 'INT256'>, <TokenType.KILL: 'KILL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SINK: 'SINK'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RING: 'RING'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.IS: 'IS'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.XML: 'XML'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATE: 'DATE'>, <TokenType.DETACH: 'DETACH'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ALL: 'ALL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CASE: 'CASE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SET: 'SET'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>}
COMMENT_TABLE_ALIAS_TOKENS = {<TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.PUT: 'PUT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.COPY: 'COPY'>, <TokenType.UINT: 'UINT'>, <TokenType.TOP: 'TOP'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.TRUE: 'TRUE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NAME: 'NAME'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.SOME: 'SOME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.VAR: 'VAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.USE: 'USE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.POINT: 'POINT'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIT: 'BIT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.END: 'END'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ANY: 'ANY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TAG: 'TAG'>, <TokenType.INT128: 'INT128'>, <TokenType.GET: 'GET'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.NULL: 'NULL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SHOW: 'SHOW'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.JSON: 'JSON'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOID: 'VOID'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.INT256: 'INT256'>, <TokenType.KILL: 'KILL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SINK: 'SINK'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RING: 'RING'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.XML: 'XML'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DETACH: 'DETACH'>, <TokenType.DATE: 'DATE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ALL: 'ALL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.CASE: 'CASE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SET: 'SET'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>}
UPDATE_ALIAS_TOKENS = {<TokenType.GEOGRAPHYPOINT: 'GEOGRAPHYPOINT'>, <TokenType.PUT: 'PUT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.YEAR: 'YEAR'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.LIST: 'LIST'>, <TokenType.EXPORT: 'EXPORT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BLOB: 'BLOB'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.DIV: 'DIV'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.COPY: 'COPY'>, <TokenType.UINT: 'UINT'>, <TokenType.TOP: 'TOP'>, <TokenType.FALSE: 'FALSE'>, <TokenType.TIME: 'TIME'>, <TokenType.INT: 'INT'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.TRUE: 'TRUE'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NAME: 'NAME'>, <TokenType.CURRENT_SCHEMA: 'CURRENT_SCHEMA'>, <TokenType.SOME: 'SOME'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.VAR: 'VAR'>, <TokenType.SOURCE: 'SOURCE'>, <TokenType.SEMANTIC_VIEW: 'SEMANTIC_VIEW'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FILE_FORMAT: 'FILE_FORMAT'>, <TokenType.USE: 'USE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.ATTACH: 'ATTACH'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.POINT: 'POINT'>, <TokenType.NAMESPACE: 'NAMESPACE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.BIT: 'BIT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.END: 'END'>, <TokenType.UUID: 'UUID'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.INDEX: 'INDEX'>, <TokenType.ANY: 'ANY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TAG: 'TAG'>, <TokenType.INT128: 'INT128'>, <TokenType.GET: 'GET'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.NULL: 'NULL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DYNAMIC: 'DYNAMIC'>, <TokenType.CUBE: 'CUBE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SHOW: 'SHOW'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.JSON: 'JSON'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VOID: 'VOID'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.INT256: 'INT256'>, <TokenType.KILL: 'KILL'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SINK: 'SINK'>, <TokenType.LIMIT: 'LIMIT'>, <TokenType.SMALLDATETIME: 'SMALLDATETIME'>, <TokenType.RING: 'RING'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.IS: 'IS'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NOTHING: 'NOTHING'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.NEXT: 'NEXT'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DELETE: 'DELETE'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.XML: 'XML'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DETACH: 'DETACH'>, <TokenType.DATE: 'DATE'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ALL: 'ALL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.CASE: 'CASE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.INET: 'INET'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.MAP: 'MAP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STAGE: 'STAGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.UDOUBLE: 'UDOUBLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATETIME2: 'DATETIME2'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.RENAME: 'RENAME'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TRUNCATE: 'TRUNCATE'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.And'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONCAT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcatAgg'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFirst'>>, 'ARRAY_INTERSECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayIntersect'>>, 'ARRAY_INTERSECTION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayIntersect'>>, 'ARRAY_LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayLast'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_REMOVE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayRemove'>>, 'ARRAY_REVERSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayReverse'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SLICE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySlice'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ascii'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'BIT_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseAndAgg'>>, 'BIT_COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseCountAgg'>>, 'BIT_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseOrAgg'>>, 'BIT_XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.BitwiseXorAgg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CONVERT_TO_CHARSET': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConvertToCharset'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_SCHEMA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentSchema'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_TIMESTAMP_L_T_Z': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestampLTZ'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_date>, 'DATE_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'DATE_BIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateBin'>>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME': <function _build_datetime>, 'DATETIME_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DECODE_CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DecodeCase'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'ENDS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.EndsWith'>>, 'ENDSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.EndsWith'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FEATURES_AT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FeaturesAtTime'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GET_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GetExtract'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_ASCII': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsAscii'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_B_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBObjectAgg'>>, 'J_S_O_N_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONCast'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'JSON_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONType'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function BigQuery.Parser.<lambda>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHAR_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'CHARACTER_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Or'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Replace'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Space'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'ST_DISTANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StDistance'>>, 'ST_POINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StPoint'>>, 'ST_MAKEPOINT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StPoint'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRTOK_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTRING_INDEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SubstringIndex'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <function _build_time>, 'TIME_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <function _build_timestamp>, 'TIMESTAMP_ADD': <function build_date_delta_with_interval.<locals>._builder>, 'TIMESTAMPDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMPFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampFromParts'>>, 'TIMESTAMP_SUB': <function build_date_delta_with_interval.<locals>._builder>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToNumber'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'TYPEOF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Typeof'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNICODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unicode'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'XMLELEMENT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLElement'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'STRPOS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'CHARINDEX': <function Parser.<lambda>>, 'INSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'LOCATE': <function Parser.<lambda>>, 'TO_HEX': <function _build_to_hex>, 'CONTAINS_SUBSTR': <function _build_contains_substring>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'EDIT_DISTANCE': <function _build_levenshtein>, 'FORMAT_DATE': <function _build_format_time.<locals>._builder>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'JSON_QUERY': <function build_extract_json_with_path.<locals>._builder>, 'JSON_QUERY_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_VALUE': <function _build_extract_json_with_default_path.<locals>._builder>, 'JSON_VALUE_ARRAY': <function _build_extract_json_with_default_path.<locals>._builder>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _build_parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_MICROS': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_MILLIS': <function BigQuery.Parser.<lambda>>, 'TIMESTAMP_SECONDS': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'FORMAT_DATETIME': <function _build_format_time.<locals>._builder>, 'FORMAT_TIMESTAMP': <function _build_format_time.<locals>._builder>}
FUNCTION_PARSERS = {'ARG_MAX': <function Parser.<dictcomp>.<lambda>>, 'ARGMAX': <function Parser.<dictcomp>.<lambda>>, 'MAX_BY': <function Parser.<dictcomp>.<lambda>>, 'ARG_MIN': <function Parser.<dictcomp>.<lambda>>, 'ARGMIN': <function Parser.<dictcomp>.<lambda>>, 'MIN_BY': <function Parser.<dictcomp>.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CEIL': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'FLOOR': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'XMLELEMENT': <function Parser.<lambda>>, 'XMLTABLE': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>, 'JSON_ARRAY': <function BigQuery.Parser.<lambda>>, 'MAKE_INTERVAL': <function BigQuery.Parser.<lambda>>, 'FEATURES_AT_TIME': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.NESTED: 'NESTED'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.MAP: 'MAP'>, <TokenType.UNION: 'UNION'>, <TokenType.RANGE: 'RANGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.LIST: 'LIST'>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'ENVIRONMENT': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WATERMARK': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'BUCKET': <function Parser.<lambda>>, 'TRUNCATE': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>}
NULL_TOKENS = {<TokenType.NULL: 'NULL'>, <TokenType.UNKNOWN: 'UNKNOWN'>}
DASHED_TABLE_PART_FOLLOW_TOKENS = {<TokenType.R_PAREN: 'R_PAREN'>, <TokenType.L_PAREN: 'L_PAREN'>, <TokenType.DOT: 'DOT'>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.ANALYZE: 'ANALYZE'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UNPIVOT: 'UNPIVOT'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.ELSE: 'ELSE'>: <function BigQuery.Parser.<lambda>>, <TokenType.END: 'END'>: <function BigQuery.Parser.<lambda>>, <TokenType.FOR: 'FOR'>: <function BigQuery.Parser.<lambda>>, <TokenType.EXPORT: 'EXPORT'>: <function BigQuery.Parser.<lambda>>, <TokenType.DECLARE: 'DECLARE'>: <function BigQuery.Parser.<lambda>>}
BRACKET_OFFSETS = {'OFFSET': (0, False), 'ORDINAL': (1, False), 'SAFE_OFFSET': (0, True), 'SAFE_ORDINAL': (1, True)}
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
STRUCT_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
COLON_PLACEHOLDER_TOKENS
ARRAY_CONSTRUCTORS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
TIMESTAMPS
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
CAST_COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
PIPE_SYNTAX_TRANSFORM_PARSERS
ALTER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
QUERY_MODIFIER_TOKENS
SET_PARSERS
SHOW_PARSERS
TYPE_LITERAL_PARSERS
TYPE_CONVERTERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
WINDOW_EXCLUDE_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
ANALYZE_STYLES
ANALYZE_EXPRESSION_PARSERS
PARTITION_KEYWORDS
AMBIGUOUS_ALIAS_TOKENS
OPERATION_MODIFIERS
RECURSIVE_CTE_SEARCH_KIND
MODIFIABLES
STRICT_CAST
IDENTIFY_PIVOT_STRINGS
TABLESAMPLE_CSV
DEFAULT_SAMPLING_METHOD
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
COLON_IS_VARIANT_EXTRACT
VALUES_FOLLOWED_BY_PAREN
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
WRAPPED_TRANSFORM_COLUMN_CONSTRAINT
OPTIONAL_ALIAS_TOKEN_CTE
ALTER_RENAME_REQUIRES_COLUMN
ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
MAP_KEYS_ARE_ARBITRARY_EXPRESSIONS
JSON_EXTRACT_REQUIRES_JSON_EXPRESSION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
parse_set_operation
build_cast
errors
sql
class BigQuery.Generator(sqlglot.generator.Generator):
 972    class Generator(generator.Generator):
 973        INTERVAL_ALLOWS_PLURAL_FORM = False
 974        JOIN_HINTS = False
 975        QUERY_HINTS = False
 976        TABLE_HINTS = False
 977        LIMIT_FETCH = "LIMIT"
 978        RENAME_TABLE_WITH_DB = False
 979        NVL2_SUPPORTED = False
 980        UNNEST_WITH_ORDINALITY = False
 981        COLLATE_IS_FUNC = True
 982        LIMIT_ONLY_LITERALS = True
 983        SUPPORTS_TABLE_ALIAS_COLUMNS = False
 984        UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
 985        JSON_KEY_VALUE_PAIR_SEP = ","
 986        NULL_ORDERING_SUPPORTED = False
 987        IGNORE_NULLS_IN_FUNC = True
 988        JSON_PATH_SINGLE_QUOTE_ESCAPE = True
 989        CAN_IMPLEMENT_ARRAY_ANY = True
 990        SUPPORTS_TO_NUMBER = False
 991        NAMED_PLACEHOLDER_TOKEN = "@"
 992        HEX_FUNC = "TO_HEX"
 993        WITH_PROPERTIES_PREFIX = "OPTIONS"
 994        SUPPORTS_EXPLODING_PROJECTIONS = False
 995        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 996        SUPPORTS_UNIX_SECONDS = True
 997
 998        TRANSFORMS = {
 999            **generator.Generator.TRANSFORMS,
1000            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
1001            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
1002            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
1003            exp.Array: inline_array_unless_query,
1004            exp.ArrayContains: _array_contains_sql,
1005            exp.ArrayFilter: filter_array_using_unnest,
1006            exp.ArrayRemove: filter_array_using_unnest,
1007            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
1008            exp.CollateProperty: lambda self, e: (
1009                f"DEFAULT COLLATE {self.sql(e, 'this')}"
1010                if e.args.get("default")
1011                else f"COLLATE {self.sql(e, 'this')}"
1012            ),
1013            exp.Commit: lambda *_: "COMMIT TRANSACTION",
1014            exp.CountIf: rename_func("COUNTIF"),
1015            exp.Create: _create_sql,
1016            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
1017            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
1018            exp.DateDiff: lambda self, e: self.func(
1019                "DATE_DIFF", e.this, e.expression, unit_to_var(e)
1020            ),
1021            exp.DateFromParts: rename_func("DATE"),
1022            exp.DateStrToDate: datestrtodate_sql,
1023            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
1024            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
1025            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
1026            exp.FromTimeZone: lambda self, e: self.func(
1027                "DATETIME", self.func("TIMESTAMP", e.this, e.args.get("zone")), "'UTC'"
1028            ),
1029            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
1030            exp.GroupConcat: lambda self, e: groupconcat_sql(
1031                self, e, func_name="STRING_AGG", within_group=False
1032            ),
1033            exp.Hex: lambda self, e: self.func("UPPER", self.func("TO_HEX", self.sql(e, "this"))),
1034            exp.HexString: lambda self, e: self.hexstring_sql(e, binary_function_repr="FROM_HEX"),
1035            exp.If: if_sql(false_value="NULL"),
1036            exp.ILike: no_ilike_sql,
1037            exp.IntDiv: rename_func("DIV"),
1038            exp.Int64: rename_func("INT64"),
1039            exp.JSONExtract: _json_extract_sql,
1040            exp.JSONExtractArray: _json_extract_sql,
1041            exp.JSONExtractScalar: _json_extract_sql,
1042            exp.JSONFormat: rename_func("TO_JSON_STRING"),
1043            exp.Levenshtein: _levenshtein_sql,
1044            exp.Max: max_or_greatest,
1045            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
1046            exp.MD5Digest: rename_func("MD5"),
1047            exp.Min: min_or_least,
1048            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
1049            exp.RegexpExtract: lambda self, e: self.func(
1050                "REGEXP_EXTRACT",
1051                e.this,
1052                e.expression,
1053                e.args.get("position"),
1054                e.args.get("occurrence"),
1055            ),
1056            exp.RegexpExtractAll: lambda self, e: self.func(
1057                "REGEXP_EXTRACT_ALL", e.this, e.expression
1058            ),
1059            exp.RegexpReplace: regexp_replace_sql,
1060            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
1061            exp.ReturnsProperty: _returnsproperty_sql,
1062            exp.Rollback: lambda *_: "ROLLBACK TRANSACTION",
1063            exp.Select: transforms.preprocess(
1064                [
1065                    transforms.explode_projection_to_unnest(),
1066                    transforms.unqualify_unnest,
1067                    transforms.eliminate_distinct_on,
1068                    _alias_ordered_group,
1069                    transforms.eliminate_semi_and_anti_joins,
1070                ]
1071            ),
1072            exp.SHA: rename_func("SHA1"),
1073            exp.SHA2: sha256_sql,
1074            exp.Space: space_sql,
1075            exp.StabilityProperty: lambda self, e: (
1076                "DETERMINISTIC" if e.name == "IMMUTABLE" else "NOT DETERMINISTIC"
1077            ),
1078            exp.String: rename_func("STRING"),
1079            exp.StrPosition: lambda self, e: (
1080                strposition_sql(
1081                    self, e, func_name="INSTR", supports_position=True, supports_occurrence=True
1082                )
1083            ),
1084            exp.StrToDate: _str_to_datetime_sql,
1085            exp.StrToTime: _str_to_datetime_sql,
1086            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
1087            exp.TimeFromParts: rename_func("TIME"),
1088            exp.TimestampFromParts: rename_func("DATETIME"),
1089            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
1090            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
1091            exp.TimestampDiff: rename_func("TIMESTAMP_DIFF"),
1092            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
1093            exp.TimeStrToTime: timestrtotime_sql,
1094            exp.Transaction: lambda *_: "BEGIN TRANSACTION",
1095            exp.TsOrDsAdd: _ts_or_ds_add_sql,
1096            exp.TsOrDsDiff: _ts_or_ds_diff_sql,
1097            exp.TsOrDsToTime: rename_func("TIME"),
1098            exp.TsOrDsToDatetime: rename_func("DATETIME"),
1099            exp.TsOrDsToTimestamp: rename_func("TIMESTAMP"),
1100            exp.Unhex: rename_func("FROM_HEX"),
1101            exp.UnixDate: rename_func("UNIX_DATE"),
1102            exp.UnixToTime: _unix_to_time_sql,
1103            exp.Uuid: lambda *_: "GENERATE_UUID()",
1104            exp.Values: _derived_table_values_to_unnest,
1105            exp.VariancePop: rename_func("VAR_POP"),
1106            exp.SafeDivide: rename_func("SAFE_DIVIDE"),
1107        }
1108
1109        SUPPORTED_JSON_PATH_PARTS = {
1110            exp.JSONPathKey,
1111            exp.JSONPathRoot,
1112            exp.JSONPathSubscript,
1113        }
1114
1115        TYPE_MAPPING = {
1116            **generator.Generator.TYPE_MAPPING,
1117            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
1118            exp.DataType.Type.BIGINT: "INT64",
1119            exp.DataType.Type.BINARY: "BYTES",
1120            exp.DataType.Type.BLOB: "BYTES",
1121            exp.DataType.Type.BOOLEAN: "BOOL",
1122            exp.DataType.Type.CHAR: "STRING",
1123            exp.DataType.Type.DECIMAL: "NUMERIC",
1124            exp.DataType.Type.DOUBLE: "FLOAT64",
1125            exp.DataType.Type.FLOAT: "FLOAT64",
1126            exp.DataType.Type.INT: "INT64",
1127            exp.DataType.Type.NCHAR: "STRING",
1128            exp.DataType.Type.NVARCHAR: "STRING",
1129            exp.DataType.Type.SMALLINT: "INT64",
1130            exp.DataType.Type.TEXT: "STRING",
1131            exp.DataType.Type.TIMESTAMP: "DATETIME",
1132            exp.DataType.Type.TIMESTAMPNTZ: "DATETIME",
1133            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
1134            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
1135            exp.DataType.Type.TINYINT: "INT64",
1136            exp.DataType.Type.ROWVERSION: "BYTES",
1137            exp.DataType.Type.UUID: "STRING",
1138            exp.DataType.Type.VARBINARY: "BYTES",
1139            exp.DataType.Type.VARCHAR: "STRING",
1140            exp.DataType.Type.VARIANT: "ANY TYPE",
1141        }
1142
1143        PROPERTIES_LOCATION = {
1144            **generator.Generator.PROPERTIES_LOCATION,
1145            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
1146            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
1147        }
1148
1149        # WINDOW comes after QUALIFY
1150        # https://cloud.google.com/bigquery/docs/reference/standard-sql/query-syntax#window_clause
1151        AFTER_HAVING_MODIFIER_TRANSFORMS = {
1152            "qualify": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["qualify"],
1153            "windows": generator.Generator.AFTER_HAVING_MODIFIER_TRANSFORMS["windows"],
1154        }
1155
1156        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
1157        RESERVED_KEYWORDS = {
1158            "all",
1159            "and",
1160            "any",
1161            "array",
1162            "as",
1163            "asc",
1164            "assert_rows_modified",
1165            "at",
1166            "between",
1167            "by",
1168            "case",
1169            "cast",
1170            "collate",
1171            "contains",
1172            "create",
1173            "cross",
1174            "cube",
1175            "current",
1176            "default",
1177            "define",
1178            "desc",
1179            "distinct",
1180            "else",
1181            "end",
1182            "enum",
1183            "escape",
1184            "except",
1185            "exclude",
1186            "exists",
1187            "extract",
1188            "false",
1189            "fetch",
1190            "following",
1191            "for",
1192            "from",
1193            "full",
1194            "group",
1195            "grouping",
1196            "groups",
1197            "hash",
1198            "having",
1199            "if",
1200            "ignore",
1201            "in",
1202            "inner",
1203            "intersect",
1204            "interval",
1205            "into",
1206            "is",
1207            "join",
1208            "lateral",
1209            "left",
1210            "like",
1211            "limit",
1212            "lookup",
1213            "merge",
1214            "natural",
1215            "new",
1216            "no",
1217            "not",
1218            "null",
1219            "nulls",
1220            "of",
1221            "on",
1222            "or",
1223            "order",
1224            "outer",
1225            "over",
1226            "partition",
1227            "preceding",
1228            "proto",
1229            "qualify",
1230            "range",
1231            "recursive",
1232            "respect",
1233            "right",
1234            "rollup",
1235            "rows",
1236            "select",
1237            "set",
1238            "some",
1239            "struct",
1240            "tablesample",
1241            "then",
1242            "to",
1243            "treat",
1244            "true",
1245            "unbounded",
1246            "union",
1247            "unnest",
1248            "using",
1249            "when",
1250            "where",
1251            "window",
1252            "with",
1253            "within",
1254        }
1255
1256        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1257            unit = expression.unit
1258            unit_sql = unit.name if unit.is_string else self.sql(unit)
1259            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
1260
1261        def mod_sql(self, expression: exp.Mod) -> str:
1262            this = expression.this
1263            expr = expression.expression
1264            return self.func(
1265                "MOD",
1266                this.unnest() if isinstance(this, exp.Paren) else this,
1267                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1268            )
1269
1270        def column_parts(self, expression: exp.Column) -> str:
1271            if expression.meta.get("quoted_column"):
1272                # If a column reference is of the form `dataset.table`.name, we need
1273                # to preserve the quoted table path, otherwise the reference breaks
1274                table_parts = ".".join(p.name for p in expression.parts[:-1])
1275                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1276                return f"{table_path}.{self.sql(expression, 'this')}"
1277
1278            return super().column_parts(expression)
1279
1280        def table_parts(self, expression: exp.Table) -> str:
1281            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1282            # we need to make sure the correct quoting is used in each case.
1283            #
1284            # For example, if there is a CTE x that clashes with a schema name, then the former will
1285            # return the table y in that schema, whereas the latter will return the CTE's y column:
1286            #
1287            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1288            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1289            if expression.meta.get("quoted_table"):
1290                table_parts = ".".join(p.name for p in expression.parts)
1291                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1292
1293            return super().table_parts(expression)
1294
1295        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1296            this = expression.this
1297            if isinstance(this, exp.TsOrDsToDatetime):
1298                func_name = "FORMAT_DATETIME"
1299            elif isinstance(this, exp.TsOrDsToTimestamp):
1300                func_name = "FORMAT_TIMESTAMP"
1301            else:
1302                func_name = "FORMAT_DATE"
1303
1304            time_expr = (
1305                this
1306                if isinstance(this, (exp.TsOrDsToDatetime, exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
1307                else expression
1308            )
1309            return self.func(
1310                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1311            )
1312
1313        def eq_sql(self, expression: exp.EQ) -> str:
1314            # Operands of = cannot be NULL in BigQuery
1315            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1316                if not isinstance(expression.parent, exp.Update):
1317                    return "NULL"
1318
1319            return self.binary(expression, "=")
1320
1321        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1322            parent = expression.parent
1323
1324            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1325            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1326            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1327                return self.func(
1328                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1329                )
1330
1331            return super().attimezone_sql(expression)
1332
1333        def trycast_sql(self, expression: exp.TryCast) -> str:
1334            return self.cast_sql(expression, safe_prefix="SAFE_")
1335
1336        def bracket_sql(self, expression: exp.Bracket) -> str:
1337            this = expression.this
1338            expressions = expression.expressions
1339
1340            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1341                arg = expressions[0]
1342                if arg.type is None:
1343                    from sqlglot.optimizer.annotate_types import annotate_types
1344
1345                    arg = annotate_types(arg, dialect=self.dialect)
1346
1347                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1348                    # BQ doesn't support bracket syntax with string values for structs
1349                    return f"{self.sql(this)}.{arg.name}"
1350
1351            expressions_sql = self.expressions(expression, flat=True)
1352            offset = expression.args.get("offset")
1353
1354            if offset == 0:
1355                expressions_sql = f"OFFSET({expressions_sql})"
1356            elif offset == 1:
1357                expressions_sql = f"ORDINAL({expressions_sql})"
1358            elif offset is not None:
1359                self.unsupported(f"Unsupported array offset: {offset}")
1360
1361            if expression.args.get("safe"):
1362                expressions_sql = f"SAFE_{expressions_sql}"
1363
1364            return f"{self.sql(this)}[{expressions_sql}]"
1365
1366        def in_unnest_op(self, expression: exp.Unnest) -> str:
1367            return self.sql(expression)
1368
1369        def version_sql(self, expression: exp.Version) -> str:
1370            if expression.name == "TIMESTAMP":
1371                expression.set("this", "SYSTEM_TIME")
1372            return super().version_sql(expression)
1373
1374        def contains_sql(self, expression: exp.Contains) -> str:
1375            this = expression.this
1376            expr = expression.expression
1377
1378            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1379                this = this.this
1380                expr = expr.this
1381
1382            return self.func("CONTAINS_SUBSTR", this, expr)
1383
1384        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1385            this = expression.this
1386
1387            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1388            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1389            # because they aren't literals and so the above syntax is invalid BigQuery.
1390            if isinstance(this, exp.Array):
1391                elem = seq_get(this.expressions, 0)
1392                if not (elem and elem.find(exp.Query)):
1393                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1394
1395            return super().cast_sql(expression, safe_prefix=safe_prefix)
1396
1397        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1398            variables = self.expressions(expression, "this")
1399            default = self.sql(expression, "default")
1400            default = f" DEFAULT {default}" if default else ""
1401            kind = self.sql(expression, "kind")
1402            kind = f" {kind}" if kind else ""
1403
1404            return f"{variables}{kind}{default}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
NVL2_SUPPORTED = False
UNNEST_WITH_ORDINALITY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
SUPPORTS_TABLE_ALIAS_COLUMNS = False
UNPIVOT_ALIASES_ARE_IDENTIFIERS = False
JSON_KEY_VALUE_PAIR_SEP = ','
NULL_ORDERING_SUPPORTED = False
IGNORE_NULLS_IN_FUNC = True
JSON_PATH_SINGLE_QUOTE_ESCAPE = True
CAN_IMPLEMENT_ARRAY_ANY = True
SUPPORTS_TO_NUMBER = False
NAMED_PLACEHOLDER_TOKEN = '@'
HEX_FUNC = 'TO_HEX'
WITH_PROPERTIES_PREFIX = 'OPTIONS'
SUPPORTS_EXPLODING_PROJECTIONS = False
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_UNIX_SECONDS = True
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeColumns'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AnalyzeWith'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Ceil'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConvertToCharset'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CredentialsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EnviromentProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Floor'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Get'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByBucket'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PartitionByTruncate'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PositionalColumn'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Put'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TableColumn'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Tags'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UsingData'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ForceProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArgMin'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.Array'>: <function inline_array_unless_query>, <class 'sqlglot.expressions.ArrayContains'>: <function _array_contains_sql>, <class 'sqlglot.expressions.ArrayFilter'>: <function filter_array_using_unnest>, <class 'sqlglot.expressions.ArrayRemove'>: <function filter_array_using_unnest>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CollateProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Commit'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.CountIf'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.FromTimeZone'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.HexString'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function _json_extract_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function _json_extract_sql>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function _levenshtein_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Rollback'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.SHA2'>: <function sha256_sql>, <class 'sqlglot.expressions.Space'>: <function space_sql>, <class 'sqlglot.expressions.String'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function _str_to_datetime_sql>, <class 'sqlglot.expressions.StrToTime'>: <function _str_to_datetime_sql>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampDiff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Transaction'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _ts_or_ds_add_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function _ts_or_ds_diff_sql>, <class 'sqlglot.expressions.TsOrDsToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDatetime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsToTimestamp'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixDate'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function _unix_to_time_sql>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.SafeDivide'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.DATETIME2: 'DATETIME2'>: 'TIMESTAMP', <Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.BLOB: 'BLOB'>: 'BYTES', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'BYTES', <Type.SMALLDATETIME: 'SMALLDATETIME'>: 'TIMESTAMP', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.UUID: 'UUID'>: 'STRING', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EncodeProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EnviromentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.IncludeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StorageHandlerProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Tags'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.UsingTemplateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ForceProperty'>: <Location.POST_CREATE: 'POST_CREATE'>}
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
RESERVED_KEYWORDS = {'true', 'else', 'assert_rows_modified', 'treat', 'where', 'ignore', 'when', 'current', 'between', 'respect', 'escape', 'select', 'like', 'new', 'window', 'end', 'join', 'and', 'recursive', 'null', 'some', 'limit', 'default', 'desc', 'exists', 'false', 'right', 'extract', 'on', 'if', 'qualify', 'from', 'except', 'left', 'within', 'any', 'groups', 'full', 'to', 'distinct', 'inner', 'exclude', 'set', 'case', 'all', 'of', 'cross', 'hash', 'define', 'interval', 'rollup', 'by', 'cast', 'cube', 'intersect', 'enum', 'merge', 'partition', 'for', 'grouping', 'having', 'collate', 'array', 'range', 'lookup', 'create', 'into', 'rows', 'over', 'nulls', 'no', 'or', 'natural', 'using', 'in', 'unbounded', 'as', 'group', 'lateral', 'unnest', 'fetch', 'preceding', 'tablesample', 'not', 'outer', 'at', 'union', 'then', 'proto', 'order', 'struct', 'with', 'is', 'asc', 'following', 'contains'}
def datetrunc_sql(self, expression: sqlglot.expressions.DateTrunc) -> str:
1256        def datetrunc_sql(self, expression: exp.DateTrunc) -> str:
1257            unit = expression.unit
1258            unit_sql = unit.name if unit.is_string else self.sql(unit)
1259            return self.func("DATE_TRUNC", expression.this, unit_sql, expression.args.get("zone"))
def mod_sql(self, expression: sqlglot.expressions.Mod) -> str:
1261        def mod_sql(self, expression: exp.Mod) -> str:
1262            this = expression.this
1263            expr = expression.expression
1264            return self.func(
1265                "MOD",
1266                this.unnest() if isinstance(this, exp.Paren) else this,
1267                expr.unnest() if isinstance(expr, exp.Paren) else expr,
1268            )
def column_parts(self, expression: sqlglot.expressions.Column) -> str:
1270        def column_parts(self, expression: exp.Column) -> str:
1271            if expression.meta.get("quoted_column"):
1272                # If a column reference is of the form `dataset.table`.name, we need
1273                # to preserve the quoted table path, otherwise the reference breaks
1274                table_parts = ".".join(p.name for p in expression.parts[:-1])
1275                table_path = self.sql(exp.Identifier(this=table_parts, quoted=True))
1276                return f"{table_path}.{self.sql(expression, 'this')}"
1277
1278            return super().column_parts(expression)
def table_parts(self, expression: sqlglot.expressions.Table) -> str:
1280        def table_parts(self, expression: exp.Table) -> str:
1281            # Depending on the context, `x.y` may not resolve to the same data source as `x`.`y`, so
1282            # we need to make sure the correct quoting is used in each case.
1283            #
1284            # For example, if there is a CTE x that clashes with a schema name, then the former will
1285            # return the table y in that schema, whereas the latter will return the CTE's y column:
1286            #
1287            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x.y`   -> cross join
1288            # - WITH x AS (SELECT [1, 2] AS y) SELECT * FROM x, `x`.`y` -> implicit unnest
1289            if expression.meta.get("quoted_table"):
1290                table_parts = ".".join(p.name for p in expression.parts)
1291                return self.sql(exp.Identifier(this=table_parts, quoted=True))
1292
1293            return super().table_parts(expression)
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1295        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1296            this = expression.this
1297            if isinstance(this, exp.TsOrDsToDatetime):
1298                func_name = "FORMAT_DATETIME"
1299            elif isinstance(this, exp.TsOrDsToTimestamp):
1300                func_name = "FORMAT_TIMESTAMP"
1301            else:
1302                func_name = "FORMAT_DATE"
1303
1304            time_expr = (
1305                this
1306                if isinstance(this, (exp.TsOrDsToDatetime, exp.TsOrDsToTimestamp, exp.TsOrDsToDate))
1307                else expression
1308            )
1309            return self.func(
1310                func_name, self.format_time(expression), time_expr.this, expression.args.get("zone")
1311            )
def eq_sql(self, expression: sqlglot.expressions.EQ) -> str:
1313        def eq_sql(self, expression: exp.EQ) -> str:
1314            # Operands of = cannot be NULL in BigQuery
1315            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
1316                if not isinstance(expression.parent, exp.Update):
1317                    return "NULL"
1318
1319            return self.binary(expression, "=")
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
1321        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
1322            parent = expression.parent
1323
1324            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
1325            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
1326            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
1327                return self.func(
1328                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
1329                )
1330
1331            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1333        def trycast_sql(self, expression: exp.TryCast) -> str:
1334            return self.cast_sql(expression, safe_prefix="SAFE_")
def bracket_sql(self, expression: sqlglot.expressions.Bracket) -> str:
1336        def bracket_sql(self, expression: exp.Bracket) -> str:
1337            this = expression.this
1338            expressions = expression.expressions
1339
1340            if len(expressions) == 1 and this and this.is_type(exp.DataType.Type.STRUCT):
1341                arg = expressions[0]
1342                if arg.type is None:
1343                    from sqlglot.optimizer.annotate_types import annotate_types
1344
1345                    arg = annotate_types(arg, dialect=self.dialect)
1346
1347                if arg.type and arg.type.this in exp.DataType.TEXT_TYPES:
1348                    # BQ doesn't support bracket syntax with string values for structs
1349                    return f"{self.sql(this)}.{arg.name}"
1350
1351            expressions_sql = self.expressions(expression, flat=True)
1352            offset = expression.args.get("offset")
1353
1354            if offset == 0:
1355                expressions_sql = f"OFFSET({expressions_sql})"
1356            elif offset == 1:
1357                expressions_sql = f"ORDINAL({expressions_sql})"
1358            elif offset is not None:
1359                self.unsupported(f"Unsupported array offset: {offset}")
1360
1361            if expression.args.get("safe"):
1362                expressions_sql = f"SAFE_{expressions_sql}"
1363
1364            return f"{self.sql(this)}[{expressions_sql}]"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
1366        def in_unnest_op(self, expression: exp.Unnest) -> str:
1367            return self.sql(expression)
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
1369        def version_sql(self, expression: exp.Version) -> str:
1370            if expression.name == "TIMESTAMP":
1371                expression.set("this", "SYSTEM_TIME")
1372            return super().version_sql(expression)
def contains_sql(self, expression: sqlglot.expressions.Contains) -> str:
1374        def contains_sql(self, expression: exp.Contains) -> str:
1375            this = expression.this
1376            expr = expression.expression
1377
1378            if isinstance(this, exp.Lower) and isinstance(expr, exp.Lower):
1379                this = this.this
1380                expr = expr.this
1381
1382            return self.func("CONTAINS_SUBSTR", this, expr)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
1384        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1385            this = expression.this
1386
1387            # This ensures that inline type-annotated ARRAY literals like ARRAY<INT64>[1, 2, 3]
1388            # are roundtripped unaffected. The inner check excludes ARRAY(SELECT ...) expressions,
1389            # because they aren't literals and so the above syntax is invalid BigQuery.
1390            if isinstance(this, exp.Array):
1391                elem = seq_get(this.expressions, 0)
1392                if not (elem and elem.find(exp.Query)):
1393                    return f"{self.sql(expression, 'to')}{self.sql(this)}"
1394
1395            return super().cast_sql(expression, safe_prefix=safe_prefix)
def declareitem_sql(self, expression: sqlglot.expressions.DeclareItem) -> str:
1397        def declareitem_sql(self, expression: exp.DeclareItem) -> str:
1398            variables = self.expressions(expression, "this")
1399            default = self.sql(expression, "default")
1400            default = f" DEFAULT {default}" if default else ""
1401            kind = self.sql(expression, "kind")
1402            kind = f" {kind}" if kind else ""
1403
1404            return f"{variables}{kind}{default}"
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
SUPPORTS_DECODE_CASE = False
Inherited Members
sqlglot.generator.Generator
Generator
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
SUPPORTS_TABLE_COPY
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
INSERT_OVERWRITE
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
SUPPORTS_WINDOW_EXCLUDE
SET_OP_MODIFIERS
COPY_PARAMS_ARE_WRAPPED
COPY_PARAMS_EQ_REQUIRED
COPY_HAS_INTO_KEYWORD
STAR_EXCEPT
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
ARRAY_CONCAT_IS_VAR_LEN
SUPPORTS_CONVERT_TIMEZONE
SUPPORTS_MEDIAN
ALTER_SET_WRAPPED
NORMALIZE_EXTRACT_DATE_PARTS
PARSE_JSON_NAME
ARRAY_SIZE_NAME
ALTER_SET_TYPE
ARRAY_SIZE_DIM_REQUIRED
SUPPORTS_BETWEEN_FLAGS
SUPPORTS_LIKE_QUANTIFIERS
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
EXPRESSION_PRECEDES_PROPERTIES_CREATABLES
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
RESPECT_IGNORE_NULLS_UNSUPPORTED_EXPRESSIONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
sanitize_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
describe_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
limitoptions_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablefromrows_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
for_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
unnest_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
formatphrase_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterindex_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alterset_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
addpartition_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
safedivide_sql
overlaps_sql
distance_sql
dot_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
is_sql
like_sql
ilike_sql
similarto_sql
lt_sql
lte_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
jsoncast_sql
try_sql
log_sql
use_sql
binary
ceil_floor
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
whens_sql
merge_sql
tochar_sql
tonumber_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
uniquekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
struct_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonextractquote_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql
attach_sql
detach_sql
attachoption_sql
featuresattime_sql
watermarkcolumnconstraint_sql
encodeproperty_sql
includeproperty_sql
xmlelement_sql
xmlkeyvalueoption_sql
partitionbyrangeproperty_sql
partitionbyrangepropertydynamic_sql
unpivotcolumns_sql
analyzesample_sql
analyzestatistics_sql
analyzehistogram_sql
analyzedelete_sql
analyzelistchainedrows_sql
analyzevalidate_sql
analyze_sql
xmltable_sql
xmlnamespace_sql
export_sql
declare_sql
recursivewithsearch_sql
parameterizedagg_sql
anonymousaggfunc_sql
combinedaggfunc_sql
combinedparameterizedagg_sql
show_sql
get_put_sql
translatecharacters_sql
decodecase_sql
semanticview_sql
getextract_sql