Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25)
  26from sqlglot.helper import flatten, is_float, is_int, seq_get
  27from sqlglot.tokens import TokenType
  28
  29if t.TYPE_CHECKING:
  30    from sqlglot._typing import E
  31
  32
  33# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  34def _build_datetime(
  35    name: str, kind: exp.DataType.Type, safe: bool = False
  36) -> t.Callable[[t.List], exp.Func]:
  37    def _builder(args: t.List) -> exp.Func:
  38        value = seq_get(args, 0)
  39        int_value = value is not None and is_int(value.name)
  40
  41        if isinstance(value, exp.Literal):
  42            # Converts calls like `TO_TIME('01:02:03')` into casts
  43            if len(args) == 1 and value.is_string and not int_value:
  44                return (
  45                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  46                    if safe
  47                    else exp.cast(value, kind)
  48                )
  49
  50            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  51            # cases so we can transpile them, since they're relatively common
  52            if kind == exp.DataType.Type.TIMESTAMP:
  53                if int_value and not safe:
  54                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  55                    # it's not easily transpilable
  56                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  57                if not is_float(value.this):
  58                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  59                    expr.set("safe", safe)
  60                    return expr
  61
  62        if kind == exp.DataType.Type.DATE and not int_value:
  63            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  64            formatted_exp.set("safe", safe)
  65            return formatted_exp
  66
  67        return exp.Anonymous(this=name, expressions=args)
  68
  69    return _builder
  70
  71
  72def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  73    expression = parser.build_var_map(args)
  74
  75    if isinstance(expression, exp.StarMap):
  76        return expression
  77
  78    return exp.Struct(
  79        expressions=[
  80            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  81        ]
  82    )
  83
  84
  85def _build_datediff(args: t.List) -> exp.DateDiff:
  86    return exp.DateDiff(
  87        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  88    )
  89
  90
  91def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  92    def _builder(args: t.List) -> E:
  93        return expr_type(
  94            this=seq_get(args, 2),
  95            expression=seq_get(args, 1),
  96            unit=map_date_part(seq_get(args, 0)),
  97        )
  98
  99    return _builder
 100
 101
 102# https://docs.snowflake.com/en/sql-reference/functions/div0
 103def _build_if_from_div0(args: t.List) -> exp.If:
 104    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
 105        exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
 106    )
 107    true = exp.Literal.number(0)
 108    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
 109    return exp.If(this=cond, true=true, false=false)
 110
 111
 112# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 113def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 114    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 115    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 116
 117
 118# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 119def _build_if_from_nullifzero(args: t.List) -> exp.If:
 120    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 121    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 122
 123
 124def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 125    flag = expression.text("flag")
 126
 127    if "i" not in flag:
 128        flag += "i"
 129
 130    return self.func(
 131        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 132    )
 133
 134
 135def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 136    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 137
 138    if not regexp_replace.args.get("replacement"):
 139        regexp_replace.set("replacement", exp.Literal.string(""))
 140
 141    return regexp_replace
 142
 143
 144def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 145    def _parse(self: Snowflake.Parser) -> exp.Show:
 146        return self._parse_show_snowflake(*args, **kwargs)
 147
 148    return _parse
 149
 150
 151def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 152    trunc = date_trunc_to_time(args)
 153    trunc.set("unit", map_date_part(trunc.args["unit"]))
 154    return trunc
 155
 156
 157def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 158    """
 159    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 160    so we need to unqualify them.
 161
 162    Example:
 163        >>> from sqlglot import parse_one
 164        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 165        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 166        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 167    """
 168    if isinstance(expression, exp.Pivot) and expression.unpivot:
 169        expression = transforms.unqualify_columns(expression)
 170
 171    return expression
 172
 173
 174def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 175    assert isinstance(expression, exp.Create)
 176
 177    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 178        if expression.this in exp.DataType.NESTED_TYPES:
 179            expression.set("expressions", None)
 180        return expression
 181
 182    props = expression.args.get("properties")
 183    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 184        for schema_expression in expression.this.expressions:
 185            if isinstance(schema_expression, exp.ColumnDef):
 186                column_type = schema_expression.kind
 187                if isinstance(column_type, exp.DataType):
 188                    column_type.transform(_flatten_structured_type, copy=False)
 189
 190    return expression
 191
 192
 193def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 194    if isinstance(expression, exp.Select):
 195        for unnest in expression.find_all(exp.Unnest):
 196            if (
 197                isinstance(unnest.parent, (exp.From, exp.Join))
 198                and len(unnest.expressions) == 1
 199                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 200            ):
 201                generate_date_array = unnest.expressions[0]
 202                start = generate_date_array.args.get("start")
 203                end = generate_date_array.args.get("end")
 204                step = generate_date_array.args.get("step")
 205
 206                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 207                    continue
 208
 209                unit = step.args.get("unit")
 210
 211                unnest_alias = unnest.args.get("alias")
 212                if unnest_alias:
 213                    unnest_alias = unnest_alias.copy()
 214                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 215                else:
 216                    sequence_value_name = "value"
 217
 218                # We'll add the next sequence value to the starting date and project the result
 219                date_add = _build_date_time_add(exp.DateAdd)(
 220                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 221                ).as_(sequence_value_name)
 222
 223                # We use DATEDIFF to compute the number of sequence values needed
 224                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 225                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 226                )
 227
 228                unnest.set("expressions", [number_sequence])
 229                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 230
 231    return expression
 232
 233
 234class Snowflake(Dialect):
 235    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 236    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 237    NULL_ORDERING = "nulls_are_large"
 238    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 239    SUPPORTS_USER_DEFINED_TYPES = False
 240    SUPPORTS_SEMI_ANTI_JOIN = False
 241    PREFER_CTE_ALIAS_COLUMN = True
 242    TABLESAMPLE_SIZE_IS_PERCENT = True
 243    COPY_PARAMS_ARE_CSV = False
 244    ARRAY_AGG_INCLUDES_NULLS = None
 245
 246    TIME_MAPPING = {
 247        "YYYY": "%Y",
 248        "yyyy": "%Y",
 249        "YY": "%y",
 250        "yy": "%y",
 251        "MMMM": "%B",
 252        "mmmm": "%B",
 253        "MON": "%b",
 254        "mon": "%b",
 255        "MM": "%m",
 256        "mm": "%m",
 257        "DD": "%d",
 258        "dd": "%-d",
 259        "DY": "%a",
 260        "dy": "%w",
 261        "HH24": "%H",
 262        "hh24": "%H",
 263        "HH12": "%I",
 264        "hh12": "%I",
 265        "MI": "%M",
 266        "mi": "%M",
 267        "SS": "%S",
 268        "ss": "%S",
 269        "FF": "%f",
 270        "ff": "%f",
 271        "FF6": "%f",
 272        "ff6": "%f",
 273    }
 274
 275    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 276        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 277        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 278        if (
 279            isinstance(expression, exp.Identifier)
 280            and isinstance(expression.parent, exp.Table)
 281            and expression.name.lower() == "dual"
 282        ):
 283            return expression  # type: ignore
 284
 285        return super().quote_identifier(expression, identify=identify)
 286
 287    class Parser(parser.Parser):
 288        IDENTIFY_PIVOT_STRINGS = True
 289        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 290        COLON_IS_VARIANT_EXTRACT = True
 291
 292        ID_VAR_TOKENS = {
 293            *parser.Parser.ID_VAR_TOKENS,
 294            TokenType.MATCH_CONDITION,
 295        }
 296
 297        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 298        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 299
 300        FUNCTIONS = {
 301            **parser.Parser.FUNCTIONS,
 302            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 303            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 304            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 305                this=seq_get(args, 1), expression=seq_get(args, 0)
 306            ),
 307            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 308                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 309                start=seq_get(args, 0),
 310                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 311                step=seq_get(args, 2),
 312            ),
 313            "BITXOR": binary_from_function(exp.BitwiseXor),
 314            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 315            "BOOLXOR": binary_from_function(exp.Xor),
 316            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 317            "DATE_TRUNC": _date_trunc_to_time,
 318            "DATEADD": _build_date_time_add(exp.DateAdd),
 319            "DATEDIFF": _build_datediff,
 320            "DIV0": _build_if_from_div0,
 321            "FLATTEN": exp.Explode.from_arg_list,
 322            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 323                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 324            ),
 325            "IFF": exp.If.from_arg_list,
 326            "LAST_DAY": lambda args: exp.LastDay(
 327                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 328            ),
 329            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 330            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 331            "LISTAGG": exp.GroupConcat.from_arg_list,
 332            "MEDIAN": lambda args: exp.PercentileCont(
 333                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 334            ),
 335            "NULLIFZERO": _build_if_from_nullifzero,
 336            "OBJECT_CONSTRUCT": _build_object_construct,
 337            "REGEXP_REPLACE": _build_regexp_replace,
 338            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 339                this=seq_get(args, 0),
 340                expression=seq_get(args, 1),
 341                position=seq_get(args, 2),
 342                occurrence=seq_get(args, 3),
 343                parameters=seq_get(args, 4),
 344                group=seq_get(args, 5) or exp.Literal.number(0),
 345            ),
 346            "RLIKE": exp.RegexpLike.from_arg_list,
 347            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 348            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 349            "TIMEDIFF": _build_datediff,
 350            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 351            "TIMESTAMPDIFF": _build_datediff,
 352            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 353            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 354            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 355            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 356            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 357            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 358            "TRY_TO_TIMESTAMP": _build_datetime(
 359                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 360            ),
 361            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 362            "TO_NUMBER": lambda args: exp.ToNumber(
 363                this=seq_get(args, 0),
 364                format=seq_get(args, 1),
 365                precision=seq_get(args, 2),
 366                scale=seq_get(args, 3),
 367            ),
 368            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 369            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 370            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 371            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 372            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 373            "TO_VARCHAR": exp.ToChar.from_arg_list,
 374            "ZEROIFNULL": _build_if_from_zeroifnull,
 375        }
 376
 377        FUNCTION_PARSERS = {
 378            **parser.Parser.FUNCTION_PARSERS,
 379            "DATE_PART": lambda self: self._parse_date_part(),
 380            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 381        }
 382        FUNCTION_PARSERS.pop("TRIM")
 383
 384        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 385
 386        RANGE_PARSERS = {
 387            **parser.Parser.RANGE_PARSERS,
 388            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 389            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 390        }
 391
 392        ALTER_PARSERS = {
 393            **parser.Parser.ALTER_PARSERS,
 394            "UNSET": lambda self: self.expression(
 395                exp.Set,
 396                tag=self._match_text_seq("TAG"),
 397                expressions=self._parse_csv(self._parse_id_var),
 398                unset=True,
 399            ),
 400        }
 401
 402        STATEMENT_PARSERS = {
 403            **parser.Parser.STATEMENT_PARSERS,
 404            TokenType.SHOW: lambda self: self._parse_show(),
 405        }
 406
 407        PROPERTY_PARSERS = {
 408            **parser.Parser.PROPERTY_PARSERS,
 409            "LOCATION": lambda self: self._parse_location_property(),
 410        }
 411
 412        TYPE_CONVERTERS = {
 413            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 414            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 415        }
 416
 417        SHOW_PARSERS = {
 418            "SCHEMAS": _show_parser("SCHEMAS"),
 419            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 420            "OBJECTS": _show_parser("OBJECTS"),
 421            "TERSE OBJECTS": _show_parser("OBJECTS"),
 422            "TABLES": _show_parser("TABLES"),
 423            "TERSE TABLES": _show_parser("TABLES"),
 424            "VIEWS": _show_parser("VIEWS"),
 425            "TERSE VIEWS": _show_parser("VIEWS"),
 426            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 427            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 428            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 429            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 430            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 431            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 432            "SEQUENCES": _show_parser("SEQUENCES"),
 433            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 434            "COLUMNS": _show_parser("COLUMNS"),
 435            "USERS": _show_parser("USERS"),
 436            "TERSE USERS": _show_parser("USERS"),
 437        }
 438
 439        CONSTRAINT_PARSERS = {
 440            **parser.Parser.CONSTRAINT_PARSERS,
 441            "WITH": lambda self: self._parse_with_constraint(),
 442            "MASKING": lambda self: self._parse_with_constraint(),
 443            "PROJECTION": lambda self: self._parse_with_constraint(),
 444            "TAG": lambda self: self._parse_with_constraint(),
 445        }
 446
 447        STAGED_FILE_SINGLE_TOKENS = {
 448            TokenType.DOT,
 449            TokenType.MOD,
 450            TokenType.SLASH,
 451        }
 452
 453        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 454
 455        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 456
 457        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 458
 459        LAMBDAS = {
 460            **parser.Parser.LAMBDAS,
 461            TokenType.ARROW: lambda self, expressions: self.expression(
 462                exp.Lambda,
 463                this=self._replace_lambda(
 464                    self._parse_assignment(),
 465                    expressions,
 466                ),
 467                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 468            ),
 469        }
 470
 471        def _negate_range(
 472            self, this: t.Optional[exp.Expression] = None
 473        ) -> t.Optional[exp.Expression]:
 474            if not this:
 475                return this
 476
 477            query = this.args.get("query")
 478            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 479                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 480                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 481                # which can produce different results (most likely a SnowFlake bug).
 482                #
 483                # https://docs.snowflake.com/en/sql-reference/functions/in
 484                # Context: https://github.com/tobymao/sqlglot/issues/3890
 485                return self.expression(
 486                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 487                )
 488
 489            return self.expression(exp.Not, this=this)
 490
 491        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 492            if self._prev.token_type != TokenType.WITH:
 493                self._retreat(self._index - 1)
 494
 495            if self._match_text_seq("MASKING", "POLICY"):
 496                policy = self._parse_column()
 497                return self.expression(
 498                    exp.MaskingPolicyColumnConstraint,
 499                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 500                    expressions=self._match(TokenType.USING)
 501                    and self._parse_wrapped_csv(self._parse_id_var),
 502                )
 503            if self._match_text_seq("PROJECTION", "POLICY"):
 504                policy = self._parse_column()
 505                return self.expression(
 506                    exp.ProjectionPolicyColumnConstraint,
 507                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 508                )
 509            if self._match(TokenType.TAG):
 510                return self.expression(
 511                    exp.TagColumnConstraint,
 512                    expressions=self._parse_wrapped_csv(self._parse_property),
 513                )
 514
 515            return None
 516
 517        def _parse_create(self) -> exp.Create | exp.Command:
 518            expression = super()._parse_create()
 519            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 520                # Replace the Table node with the enclosed Identifier
 521                expression.this.replace(expression.this.this)
 522
 523            return expression
 524
 525        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 526        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 527        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 528            this = self._parse_var() or self._parse_type()
 529
 530            if not this:
 531                return None
 532
 533            self._match(TokenType.COMMA)
 534            expression = self._parse_bitwise()
 535            this = map_date_part(this)
 536            name = this.name.upper()
 537
 538            if name.startswith("EPOCH"):
 539                if name == "EPOCH_MILLISECOND":
 540                    scale = 10**3
 541                elif name == "EPOCH_MICROSECOND":
 542                    scale = 10**6
 543                elif name == "EPOCH_NANOSECOND":
 544                    scale = 10**9
 545                else:
 546                    scale = None
 547
 548                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 549                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 550
 551                if scale:
 552                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 553
 554                return to_unix
 555
 556            return self.expression(exp.Extract, this=this, expression=expression)
 557
 558        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 559            if is_map:
 560                # Keys are strings in Snowflake's objects, see also:
 561                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 562                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 563                return self._parse_slice(self._parse_string())
 564
 565            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 566
 567        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 568            lateral = super()._parse_lateral()
 569            if not lateral:
 570                return lateral
 571
 572            if isinstance(lateral.this, exp.Explode):
 573                table_alias = lateral.args.get("alias")
 574                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 575                if table_alias and not table_alias.args.get("columns"):
 576                    table_alias.set("columns", columns)
 577                elif not table_alias:
 578                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 579
 580            return lateral
 581
 582        def _parse_table_parts(
 583            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 584        ) -> exp.Table:
 585            # https://docs.snowflake.com/en/user-guide/querying-stage
 586            if self._match(TokenType.STRING, advance=False):
 587                table = self._parse_string()
 588            elif self._match_text_seq("@", advance=False):
 589                table = self._parse_location_path()
 590            else:
 591                table = None
 592
 593            if table:
 594                file_format = None
 595                pattern = None
 596
 597                wrapped = self._match(TokenType.L_PAREN)
 598                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 599                    if self._match_text_seq("FILE_FORMAT", "=>"):
 600                        file_format = self._parse_string() or super()._parse_table_parts(
 601                            is_db_reference=is_db_reference
 602                        )
 603                    elif self._match_text_seq("PATTERN", "=>"):
 604                        pattern = self._parse_string()
 605                    else:
 606                        break
 607
 608                    self._match(TokenType.COMMA)
 609
 610                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 611            else:
 612                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 613
 614            return table
 615
 616        def _parse_id_var(
 617            self,
 618            any_token: bool = True,
 619            tokens: t.Optional[t.Collection[TokenType]] = None,
 620        ) -> t.Optional[exp.Expression]:
 621            if self._match_text_seq("IDENTIFIER", "("):
 622                identifier = (
 623                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 624                    or self._parse_string()
 625                )
 626                self._match_r_paren()
 627                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 628
 629            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 630
 631        def _parse_show_snowflake(self, this: str) -> exp.Show:
 632            scope = None
 633            scope_kind = None
 634
 635            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 636            # which is syntactically valid but has no effect on the output
 637            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 638
 639            history = self._match_text_seq("HISTORY")
 640
 641            like = self._parse_string() if self._match(TokenType.LIKE) else None
 642
 643            if self._match(TokenType.IN):
 644                if self._match_text_seq("ACCOUNT"):
 645                    scope_kind = "ACCOUNT"
 646                elif self._match_set(self.DB_CREATABLES):
 647                    scope_kind = self._prev.text.upper()
 648                    if self._curr:
 649                        scope = self._parse_table_parts()
 650                elif self._curr:
 651                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 652                    scope = self._parse_table_parts()
 653
 654            return self.expression(
 655                exp.Show,
 656                **{
 657                    "terse": terse,
 658                    "this": this,
 659                    "history": history,
 660                    "like": like,
 661                    "scope": scope,
 662                    "scope_kind": scope_kind,
 663                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 664                    "limit": self._parse_limit(),
 665                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 666                },
 667            )
 668
 669        def _parse_location_property(self) -> exp.LocationProperty:
 670            self._match(TokenType.EQ)
 671            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 672
 673        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 674            # Parse either a subquery or a staged file
 675            return (
 676                self._parse_select(table=True, parse_subquery_alias=False)
 677                if self._match(TokenType.L_PAREN, advance=False)
 678                else self._parse_table_parts()
 679            )
 680
 681        def _parse_location_path(self) -> exp.Var:
 682            parts = [self._advance_any(ignore_reserved=True)]
 683
 684            # We avoid consuming a comma token because external tables like @foo and @bar
 685            # can be joined in a query with a comma separator, as well as closing paren
 686            # in case of subqueries
 687            while self._is_connected() and not self._match_set(
 688                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 689            ):
 690                parts.append(self._advance_any(ignore_reserved=True))
 691
 692            return exp.var("".join(part.text for part in parts if part))
 693
 694        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 695            this = super()._parse_lambda_arg()
 696
 697            if not this:
 698                return this
 699
 700            typ = self._parse_types()
 701
 702            if typ:
 703                return self.expression(exp.Cast, this=this, to=typ)
 704
 705            return this
 706
 707    class Tokenizer(tokens.Tokenizer):
 708        STRING_ESCAPES = ["\\", "'"]
 709        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 710        RAW_STRINGS = ["$$"]
 711        COMMENTS = ["--", "//", ("/*", "*/")]
 712        NESTED_COMMENTS = False
 713
 714        KEYWORDS = {
 715            **tokens.Tokenizer.KEYWORDS,
 716            "BYTEINT": TokenType.INT,
 717            "CHAR VARYING": TokenType.VARCHAR,
 718            "CHARACTER VARYING": TokenType.VARCHAR,
 719            "EXCLUDE": TokenType.EXCEPT,
 720            "ILIKE ANY": TokenType.ILIKE_ANY,
 721            "LIKE ANY": TokenType.LIKE_ANY,
 722            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 723            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 724            "MINUS": TokenType.EXCEPT,
 725            "NCHAR VARYING": TokenType.VARCHAR,
 726            "PUT": TokenType.COMMAND,
 727            "REMOVE": TokenType.COMMAND,
 728            "RM": TokenType.COMMAND,
 729            "SAMPLE": TokenType.TABLE_SAMPLE,
 730            "SQL_DOUBLE": TokenType.DOUBLE,
 731            "SQL_VARCHAR": TokenType.VARCHAR,
 732            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 733            "TAG": TokenType.TAG,
 734            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 735            "TOP": TokenType.TOP,
 736            "WAREHOUSE": TokenType.WAREHOUSE,
 737            "STREAMLIT": TokenType.STREAMLIT,
 738        }
 739        KEYWORDS.pop("/*+")
 740
 741        SINGLE_TOKENS = {
 742            **tokens.Tokenizer.SINGLE_TOKENS,
 743            "$": TokenType.PARAMETER,
 744        }
 745
 746        VAR_SINGLE_TOKENS = {"$"}
 747
 748        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 749
 750    class Generator(generator.Generator):
 751        PARAMETER_TOKEN = "$"
 752        MATCHED_BY_SOURCE = False
 753        SINGLE_STRING_INTERVAL = True
 754        JOIN_HINTS = False
 755        TABLE_HINTS = False
 756        QUERY_HINTS = False
 757        AGGREGATE_FILTER_SUPPORTED = False
 758        SUPPORTS_TABLE_COPY = False
 759        COLLATE_IS_FUNC = True
 760        LIMIT_ONLY_LITERALS = True
 761        JSON_KEY_VALUE_PAIR_SEP = ","
 762        INSERT_OVERWRITE = " OVERWRITE INTO"
 763        STRUCT_DELIMITER = ("(", ")")
 764        COPY_PARAMS_ARE_WRAPPED = False
 765        COPY_PARAMS_EQ_REQUIRED = True
 766        STAR_EXCEPT = "EXCLUDE"
 767        SUPPORTS_EXPLODING_PROJECTIONS = False
 768        ARRAY_CONCAT_IS_VAR_LEN = False
 769        SUPPORTS_CONVERT_TIMEZONE = True
 770        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 771
 772        TRANSFORMS = {
 773            **generator.Generator.TRANSFORMS,
 774            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 775            exp.ArgMax: rename_func("MAX_BY"),
 776            exp.ArgMin: rename_func("MIN_BY"),
 777            exp.Array: inline_array_sql,
 778            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 779            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 780            exp.AtTimeZone: lambda self, e: self.func(
 781                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 782            ),
 783            exp.BitwiseXor: rename_func("BITXOR"),
 784            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 785            exp.DateAdd: date_delta_sql("DATEADD"),
 786            exp.DateDiff: date_delta_sql("DATEDIFF"),
 787            exp.DateStrToDate: datestrtodate_sql,
 788            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 789            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 790            exp.DayOfYear: rename_func("DAYOFYEAR"),
 791            exp.Explode: rename_func("FLATTEN"),
 792            exp.Extract: rename_func("DATE_PART"),
 793            exp.FromTimeZone: lambda self, e: self.func(
 794                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 795            ),
 796            exp.GenerateSeries: lambda self, e: self.func(
 797                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 798            ),
 799            exp.GroupConcat: rename_func("LISTAGG"),
 800            exp.If: if_sql(name="IFF", false_value="NULL"),
 801            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 802            exp.JSONExtractScalar: lambda self, e: self.func(
 803                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 804            ),
 805            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 806            exp.JSONPathRoot: lambda *_: "",
 807            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 808            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 809            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 810            exp.Max: max_or_greatest,
 811            exp.Min: min_or_least,
 812            exp.ParseJSON: lambda self, e: self.func(
 813                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 814            ),
 815            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 816            exp.PercentileCont: transforms.preprocess(
 817                [transforms.add_within_group_for_percentiles]
 818            ),
 819            exp.PercentileDisc: transforms.preprocess(
 820                [transforms.add_within_group_for_percentiles]
 821            ),
 822            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 823            exp.RegexpILike: _regexpilike_sql,
 824            exp.Rand: rename_func("RANDOM"),
 825            exp.Select: transforms.preprocess(
 826                [
 827                    transforms.eliminate_distinct_on,
 828                    transforms.explode_to_unnest(),
 829                    transforms.eliminate_semi_and_anti_joins,
 830                    _unnest_generate_date_array,
 831                ]
 832            ),
 833            exp.SHA: rename_func("SHA1"),
 834            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 835            exp.StartsWith: rename_func("STARTSWITH"),
 836            exp.StrPosition: lambda self, e: self.func(
 837                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 838            ),
 839            exp.Stuff: rename_func("INSERT"),
 840            exp.TimeAdd: date_delta_sql("TIMEADD"),
 841            exp.TimestampDiff: lambda self, e: self.func(
 842                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 843            ),
 844            exp.TimestampTrunc: timestamptrunc_sql(),
 845            exp.TimeStrToTime: timestrtotime_sql,
 846            exp.TimeToStr: lambda self, e: self.func(
 847                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 848            ),
 849            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 850            exp.ToArray: rename_func("TO_ARRAY"),
 851            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 852            exp.ToDouble: rename_func("TO_DOUBLE"),
 853            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 854            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 855            exp.TsOrDsToDate: lambda self, e: self.func(
 856                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 857            ),
 858            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 859            exp.Uuid: rename_func("UUID_STRING"),
 860            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 861            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 862            exp.Xor: rename_func("BOOLXOR"),
 863        }
 864
 865        SUPPORTED_JSON_PATH_PARTS = {
 866            exp.JSONPathKey,
 867            exp.JSONPathRoot,
 868            exp.JSONPathSubscript,
 869        }
 870
 871        TYPE_MAPPING = {
 872            **generator.Generator.TYPE_MAPPING,
 873            exp.DataType.Type.NESTED: "OBJECT",
 874            exp.DataType.Type.STRUCT: "OBJECT",
 875        }
 876
 877        PROPERTIES_LOCATION = {
 878            **generator.Generator.PROPERTIES_LOCATION,
 879            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 880            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 881        }
 882
 883        UNSUPPORTED_VALUES_EXPRESSIONS = {
 884            exp.Map,
 885            exp.StarMap,
 886            exp.Struct,
 887            exp.VarMap,
 888        }
 889
 890        def with_properties(self, properties: exp.Properties) -> str:
 891            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 892
 893        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 894            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 895                values_as_table = False
 896
 897            return super().values_sql(expression, values_as_table=values_as_table)
 898
 899        def datatype_sql(self, expression: exp.DataType) -> str:
 900            expressions = expression.expressions
 901            if (
 902                expressions
 903                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 904                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 905            ):
 906                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 907                return "OBJECT"
 908
 909            return super().datatype_sql(expression)
 910
 911        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 912            return self.func(
 913                "TO_NUMBER",
 914                expression.this,
 915                expression.args.get("format"),
 916                expression.args.get("precision"),
 917                expression.args.get("scale"),
 918            )
 919
 920        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 921            milli = expression.args.get("milli")
 922            if milli is not None:
 923                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 924                expression.set("nano", milli_to_nano)
 925
 926            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 927
 928        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 929            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 930                return self.func("TO_GEOGRAPHY", expression.this)
 931            if expression.is_type(exp.DataType.Type.GEOMETRY):
 932                return self.func("TO_GEOMETRY", expression.this)
 933
 934            return super().cast_sql(expression, safe_prefix=safe_prefix)
 935
 936        def trycast_sql(self, expression: exp.TryCast) -> str:
 937            value = expression.this
 938
 939            if value.type is None:
 940                from sqlglot.optimizer.annotate_types import annotate_types
 941
 942                value = annotate_types(value)
 943
 944            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 945                return super().trycast_sql(expression)
 946
 947            # TRY_CAST only works for string values in Snowflake
 948            return self.cast_sql(expression)
 949
 950        def log_sql(self, expression: exp.Log) -> str:
 951            if not expression.expression:
 952                return self.func("LN", expression.this)
 953
 954            return super().log_sql(expression)
 955
 956        def unnest_sql(self, expression: exp.Unnest) -> str:
 957            unnest_alias = expression.args.get("alias")
 958            offset = expression.args.get("offset")
 959
 960            columns = [
 961                exp.to_identifier("seq"),
 962                exp.to_identifier("key"),
 963                exp.to_identifier("path"),
 964                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 965                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 966                or exp.to_identifier("value"),
 967                exp.to_identifier("this"),
 968            ]
 969
 970            if unnest_alias:
 971                unnest_alias.set("columns", columns)
 972            else:
 973                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 974
 975            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 976            alias = self.sql(unnest_alias)
 977            alias = f" AS {alias}" if alias else ""
 978            return f"{explode}{alias}"
 979
 980        def show_sql(self, expression: exp.Show) -> str:
 981            terse = "TERSE " if expression.args.get("terse") else ""
 982            history = " HISTORY" if expression.args.get("history") else ""
 983            like = self.sql(expression, "like")
 984            like = f" LIKE {like}" if like else ""
 985
 986            scope = self.sql(expression, "scope")
 987            scope = f" {scope}" if scope else ""
 988
 989            scope_kind = self.sql(expression, "scope_kind")
 990            if scope_kind:
 991                scope_kind = f" IN {scope_kind}"
 992
 993            starts_with = self.sql(expression, "starts_with")
 994            if starts_with:
 995                starts_with = f" STARTS WITH {starts_with}"
 996
 997            limit = self.sql(expression, "limit")
 998
 999            from_ = self.sql(expression, "from")
1000            if from_:
1001                from_ = f" FROM {from_}"
1002
1003            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1004
1005        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1006            # Other dialects don't support all of the following parameters, so we need to
1007            # generate default values as necessary to ensure the transpilation is correct
1008            group = expression.args.get("group")
1009
1010            # To avoid generating all these default values, we set group to None if
1011            # it's 0 (also default value) which doesn't trigger the following chain
1012            if group and group.name == "0":
1013                group = None
1014
1015            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1016            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1017            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1018
1019            return self.func(
1020                "REGEXP_SUBSTR",
1021                expression.this,
1022                expression.expression,
1023                position,
1024                occurrence,
1025                parameters,
1026                group,
1027            )
1028
1029        def describe_sql(self, expression: exp.Describe) -> str:
1030            # Default to table if kind is unknown
1031            kind_value = expression.args.get("kind") or "TABLE"
1032            kind = f" {kind_value}" if kind_value else ""
1033            this = f" {self.sql(expression, 'this')}"
1034            expressions = self.expressions(expression, flat=True)
1035            expressions = f" {expressions}" if expressions else ""
1036            return f"DESCRIBE{kind}{this}{expressions}"
1037
1038        def generatedasidentitycolumnconstraint_sql(
1039            self, expression: exp.GeneratedAsIdentityColumnConstraint
1040        ) -> str:
1041            start = expression.args.get("start")
1042            start = f" START {start}" if start else ""
1043            increment = expression.args.get("increment")
1044            increment = f" INCREMENT {increment}" if increment else ""
1045            return f"AUTOINCREMENT{start}{increment}"
1046
1047        def cluster_sql(self, expression: exp.Cluster) -> str:
1048            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1049
1050        def struct_sql(self, expression: exp.Struct) -> str:
1051            keys = []
1052            values = []
1053
1054            for i, e in enumerate(expression.expressions):
1055                if isinstance(e, exp.PropertyEQ):
1056                    keys.append(
1057                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1058                    )
1059                    values.append(e.expression)
1060                else:
1061                    keys.append(exp.Literal.string(f"_{i}"))
1062                    values.append(e)
1063
1064            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1065
1066        @generator.unsupported_args("weight", "accuracy")
1067        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1068            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1069
1070        def alterset_sql(self, expression: exp.AlterSet) -> str:
1071            exprs = self.expressions(expression, flat=True)
1072            exprs = f" {exprs}" if exprs else ""
1073            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1074            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1075            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1076            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1077            tag = self.expressions(expression, key="tag", flat=True)
1078            tag = f" TAG {tag}" if tag else ""
1079
1080            return f"SET{exprs}{file_format}{copy_options}{tag}"
1081
1082        def strtotime_sql(self, expression: exp.StrToTime):
1083            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1084            return self.func(
1085                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1086            )
class Snowflake(sqlglot.dialects.dialect.Dialect):
 235class Snowflake(Dialect):
 236    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 237    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 238    NULL_ORDERING = "nulls_are_large"
 239    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 240    SUPPORTS_USER_DEFINED_TYPES = False
 241    SUPPORTS_SEMI_ANTI_JOIN = False
 242    PREFER_CTE_ALIAS_COLUMN = True
 243    TABLESAMPLE_SIZE_IS_PERCENT = True
 244    COPY_PARAMS_ARE_CSV = False
 245    ARRAY_AGG_INCLUDES_NULLS = None
 246
 247    TIME_MAPPING = {
 248        "YYYY": "%Y",
 249        "yyyy": "%Y",
 250        "YY": "%y",
 251        "yy": "%y",
 252        "MMMM": "%B",
 253        "mmmm": "%B",
 254        "MON": "%b",
 255        "mon": "%b",
 256        "MM": "%m",
 257        "mm": "%m",
 258        "DD": "%d",
 259        "dd": "%-d",
 260        "DY": "%a",
 261        "dy": "%w",
 262        "HH24": "%H",
 263        "hh24": "%H",
 264        "HH12": "%I",
 265        "hh12": "%I",
 266        "MI": "%M",
 267        "mi": "%M",
 268        "SS": "%S",
 269        "ss": "%S",
 270        "FF": "%f",
 271        "ff": "%f",
 272        "FF6": "%f",
 273        "ff6": "%f",
 274    }
 275
 276    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 277        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 278        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 279        if (
 280            isinstance(expression, exp.Identifier)
 281            and isinstance(expression.parent, exp.Table)
 282            and expression.name.lower() == "dual"
 283        ):
 284            return expression  # type: ignore
 285
 286        return super().quote_identifier(expression, identify=identify)
 287
 288    class Parser(parser.Parser):
 289        IDENTIFY_PIVOT_STRINGS = True
 290        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 291        COLON_IS_VARIANT_EXTRACT = True
 292
 293        ID_VAR_TOKENS = {
 294            *parser.Parser.ID_VAR_TOKENS,
 295            TokenType.MATCH_CONDITION,
 296        }
 297
 298        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 299        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 300
 301        FUNCTIONS = {
 302            **parser.Parser.FUNCTIONS,
 303            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 304            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 305            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 306                this=seq_get(args, 1), expression=seq_get(args, 0)
 307            ),
 308            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 309                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 310                start=seq_get(args, 0),
 311                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 312                step=seq_get(args, 2),
 313            ),
 314            "BITXOR": binary_from_function(exp.BitwiseXor),
 315            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 316            "BOOLXOR": binary_from_function(exp.Xor),
 317            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 318            "DATE_TRUNC": _date_trunc_to_time,
 319            "DATEADD": _build_date_time_add(exp.DateAdd),
 320            "DATEDIFF": _build_datediff,
 321            "DIV0": _build_if_from_div0,
 322            "FLATTEN": exp.Explode.from_arg_list,
 323            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 324                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 325            ),
 326            "IFF": exp.If.from_arg_list,
 327            "LAST_DAY": lambda args: exp.LastDay(
 328                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 329            ),
 330            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 331            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 332            "LISTAGG": exp.GroupConcat.from_arg_list,
 333            "MEDIAN": lambda args: exp.PercentileCont(
 334                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 335            ),
 336            "NULLIFZERO": _build_if_from_nullifzero,
 337            "OBJECT_CONSTRUCT": _build_object_construct,
 338            "REGEXP_REPLACE": _build_regexp_replace,
 339            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
 340                this=seq_get(args, 0),
 341                expression=seq_get(args, 1),
 342                position=seq_get(args, 2),
 343                occurrence=seq_get(args, 3),
 344                parameters=seq_get(args, 4),
 345                group=seq_get(args, 5) or exp.Literal.number(0),
 346            ),
 347            "RLIKE": exp.RegexpLike.from_arg_list,
 348            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 349            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 350            "TIMEDIFF": _build_datediff,
 351            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 352            "TIMESTAMPDIFF": _build_datediff,
 353            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 354            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 355            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 356            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 357            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 358            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 359            "TRY_TO_TIMESTAMP": _build_datetime(
 360                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 361            ),
 362            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 363            "TO_NUMBER": lambda args: exp.ToNumber(
 364                this=seq_get(args, 0),
 365                format=seq_get(args, 1),
 366                precision=seq_get(args, 2),
 367                scale=seq_get(args, 3),
 368            ),
 369            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 370            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 371            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 372            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 373            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 374            "TO_VARCHAR": exp.ToChar.from_arg_list,
 375            "ZEROIFNULL": _build_if_from_zeroifnull,
 376        }
 377
 378        FUNCTION_PARSERS = {
 379            **parser.Parser.FUNCTION_PARSERS,
 380            "DATE_PART": lambda self: self._parse_date_part(),
 381            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 382        }
 383        FUNCTION_PARSERS.pop("TRIM")
 384
 385        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 386
 387        RANGE_PARSERS = {
 388            **parser.Parser.RANGE_PARSERS,
 389            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 390            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 391        }
 392
 393        ALTER_PARSERS = {
 394            **parser.Parser.ALTER_PARSERS,
 395            "UNSET": lambda self: self.expression(
 396                exp.Set,
 397                tag=self._match_text_seq("TAG"),
 398                expressions=self._parse_csv(self._parse_id_var),
 399                unset=True,
 400            ),
 401        }
 402
 403        STATEMENT_PARSERS = {
 404            **parser.Parser.STATEMENT_PARSERS,
 405            TokenType.SHOW: lambda self: self._parse_show(),
 406        }
 407
 408        PROPERTY_PARSERS = {
 409            **parser.Parser.PROPERTY_PARSERS,
 410            "LOCATION": lambda self: self._parse_location_property(),
 411        }
 412
 413        TYPE_CONVERTERS = {
 414            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 415            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 416        }
 417
 418        SHOW_PARSERS = {
 419            "SCHEMAS": _show_parser("SCHEMAS"),
 420            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 421            "OBJECTS": _show_parser("OBJECTS"),
 422            "TERSE OBJECTS": _show_parser("OBJECTS"),
 423            "TABLES": _show_parser("TABLES"),
 424            "TERSE TABLES": _show_parser("TABLES"),
 425            "VIEWS": _show_parser("VIEWS"),
 426            "TERSE VIEWS": _show_parser("VIEWS"),
 427            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 428            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 429            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 430            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 431            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 432            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 433            "SEQUENCES": _show_parser("SEQUENCES"),
 434            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 435            "COLUMNS": _show_parser("COLUMNS"),
 436            "USERS": _show_parser("USERS"),
 437            "TERSE USERS": _show_parser("USERS"),
 438        }
 439
 440        CONSTRAINT_PARSERS = {
 441            **parser.Parser.CONSTRAINT_PARSERS,
 442            "WITH": lambda self: self._parse_with_constraint(),
 443            "MASKING": lambda self: self._parse_with_constraint(),
 444            "PROJECTION": lambda self: self._parse_with_constraint(),
 445            "TAG": lambda self: self._parse_with_constraint(),
 446        }
 447
 448        STAGED_FILE_SINGLE_TOKENS = {
 449            TokenType.DOT,
 450            TokenType.MOD,
 451            TokenType.SLASH,
 452        }
 453
 454        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 455
 456        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 457
 458        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 459
 460        LAMBDAS = {
 461            **parser.Parser.LAMBDAS,
 462            TokenType.ARROW: lambda self, expressions: self.expression(
 463                exp.Lambda,
 464                this=self._replace_lambda(
 465                    self._parse_assignment(),
 466                    expressions,
 467                ),
 468                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 469            ),
 470        }
 471
 472        def _negate_range(
 473            self, this: t.Optional[exp.Expression] = None
 474        ) -> t.Optional[exp.Expression]:
 475            if not this:
 476                return this
 477
 478            query = this.args.get("query")
 479            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 480                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 481                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 482                # which can produce different results (most likely a SnowFlake bug).
 483                #
 484                # https://docs.snowflake.com/en/sql-reference/functions/in
 485                # Context: https://github.com/tobymao/sqlglot/issues/3890
 486                return self.expression(
 487                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 488                )
 489
 490            return self.expression(exp.Not, this=this)
 491
 492        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 493            if self._prev.token_type != TokenType.WITH:
 494                self._retreat(self._index - 1)
 495
 496            if self._match_text_seq("MASKING", "POLICY"):
 497                policy = self._parse_column()
 498                return self.expression(
 499                    exp.MaskingPolicyColumnConstraint,
 500                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 501                    expressions=self._match(TokenType.USING)
 502                    and self._parse_wrapped_csv(self._parse_id_var),
 503                )
 504            if self._match_text_seq("PROJECTION", "POLICY"):
 505                policy = self._parse_column()
 506                return self.expression(
 507                    exp.ProjectionPolicyColumnConstraint,
 508                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 509                )
 510            if self._match(TokenType.TAG):
 511                return self.expression(
 512                    exp.TagColumnConstraint,
 513                    expressions=self._parse_wrapped_csv(self._parse_property),
 514                )
 515
 516            return None
 517
 518        def _parse_create(self) -> exp.Create | exp.Command:
 519            expression = super()._parse_create()
 520            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 521                # Replace the Table node with the enclosed Identifier
 522                expression.this.replace(expression.this.this)
 523
 524            return expression
 525
 526        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 527        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 528        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 529            this = self._parse_var() or self._parse_type()
 530
 531            if not this:
 532                return None
 533
 534            self._match(TokenType.COMMA)
 535            expression = self._parse_bitwise()
 536            this = map_date_part(this)
 537            name = this.name.upper()
 538
 539            if name.startswith("EPOCH"):
 540                if name == "EPOCH_MILLISECOND":
 541                    scale = 10**3
 542                elif name == "EPOCH_MICROSECOND":
 543                    scale = 10**6
 544                elif name == "EPOCH_NANOSECOND":
 545                    scale = 10**9
 546                else:
 547                    scale = None
 548
 549                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 550                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 551
 552                if scale:
 553                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 554
 555                return to_unix
 556
 557            return self.expression(exp.Extract, this=this, expression=expression)
 558
 559        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 560            if is_map:
 561                # Keys are strings in Snowflake's objects, see also:
 562                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 563                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 564                return self._parse_slice(self._parse_string())
 565
 566            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 567
 568        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 569            lateral = super()._parse_lateral()
 570            if not lateral:
 571                return lateral
 572
 573            if isinstance(lateral.this, exp.Explode):
 574                table_alias = lateral.args.get("alias")
 575                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 576                if table_alias and not table_alias.args.get("columns"):
 577                    table_alias.set("columns", columns)
 578                elif not table_alias:
 579                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 580
 581            return lateral
 582
 583        def _parse_table_parts(
 584            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 585        ) -> exp.Table:
 586            # https://docs.snowflake.com/en/user-guide/querying-stage
 587            if self._match(TokenType.STRING, advance=False):
 588                table = self._parse_string()
 589            elif self._match_text_seq("@", advance=False):
 590                table = self._parse_location_path()
 591            else:
 592                table = None
 593
 594            if table:
 595                file_format = None
 596                pattern = None
 597
 598                wrapped = self._match(TokenType.L_PAREN)
 599                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 600                    if self._match_text_seq("FILE_FORMAT", "=>"):
 601                        file_format = self._parse_string() or super()._parse_table_parts(
 602                            is_db_reference=is_db_reference
 603                        )
 604                    elif self._match_text_seq("PATTERN", "=>"):
 605                        pattern = self._parse_string()
 606                    else:
 607                        break
 608
 609                    self._match(TokenType.COMMA)
 610
 611                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 612            else:
 613                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 614
 615            return table
 616
 617        def _parse_id_var(
 618            self,
 619            any_token: bool = True,
 620            tokens: t.Optional[t.Collection[TokenType]] = None,
 621        ) -> t.Optional[exp.Expression]:
 622            if self._match_text_seq("IDENTIFIER", "("):
 623                identifier = (
 624                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 625                    or self._parse_string()
 626                )
 627                self._match_r_paren()
 628                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 629
 630            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 631
 632        def _parse_show_snowflake(self, this: str) -> exp.Show:
 633            scope = None
 634            scope_kind = None
 635
 636            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 637            # which is syntactically valid but has no effect on the output
 638            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 639
 640            history = self._match_text_seq("HISTORY")
 641
 642            like = self._parse_string() if self._match(TokenType.LIKE) else None
 643
 644            if self._match(TokenType.IN):
 645                if self._match_text_seq("ACCOUNT"):
 646                    scope_kind = "ACCOUNT"
 647                elif self._match_set(self.DB_CREATABLES):
 648                    scope_kind = self._prev.text.upper()
 649                    if self._curr:
 650                        scope = self._parse_table_parts()
 651                elif self._curr:
 652                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 653                    scope = self._parse_table_parts()
 654
 655            return self.expression(
 656                exp.Show,
 657                **{
 658                    "terse": terse,
 659                    "this": this,
 660                    "history": history,
 661                    "like": like,
 662                    "scope": scope,
 663                    "scope_kind": scope_kind,
 664                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 665                    "limit": self._parse_limit(),
 666                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 667                },
 668            )
 669
 670        def _parse_location_property(self) -> exp.LocationProperty:
 671            self._match(TokenType.EQ)
 672            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 673
 674        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 675            # Parse either a subquery or a staged file
 676            return (
 677                self._parse_select(table=True, parse_subquery_alias=False)
 678                if self._match(TokenType.L_PAREN, advance=False)
 679                else self._parse_table_parts()
 680            )
 681
 682        def _parse_location_path(self) -> exp.Var:
 683            parts = [self._advance_any(ignore_reserved=True)]
 684
 685            # We avoid consuming a comma token because external tables like @foo and @bar
 686            # can be joined in a query with a comma separator, as well as closing paren
 687            # in case of subqueries
 688            while self._is_connected() and not self._match_set(
 689                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 690            ):
 691                parts.append(self._advance_any(ignore_reserved=True))
 692
 693            return exp.var("".join(part.text for part in parts if part))
 694
 695        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 696            this = super()._parse_lambda_arg()
 697
 698            if not this:
 699                return this
 700
 701            typ = self._parse_types()
 702
 703            if typ:
 704                return self.expression(exp.Cast, this=this, to=typ)
 705
 706            return this
 707
 708    class Tokenizer(tokens.Tokenizer):
 709        STRING_ESCAPES = ["\\", "'"]
 710        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 711        RAW_STRINGS = ["$$"]
 712        COMMENTS = ["--", "//", ("/*", "*/")]
 713        NESTED_COMMENTS = False
 714
 715        KEYWORDS = {
 716            **tokens.Tokenizer.KEYWORDS,
 717            "BYTEINT": TokenType.INT,
 718            "CHAR VARYING": TokenType.VARCHAR,
 719            "CHARACTER VARYING": TokenType.VARCHAR,
 720            "EXCLUDE": TokenType.EXCEPT,
 721            "ILIKE ANY": TokenType.ILIKE_ANY,
 722            "LIKE ANY": TokenType.LIKE_ANY,
 723            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 724            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 725            "MINUS": TokenType.EXCEPT,
 726            "NCHAR VARYING": TokenType.VARCHAR,
 727            "PUT": TokenType.COMMAND,
 728            "REMOVE": TokenType.COMMAND,
 729            "RM": TokenType.COMMAND,
 730            "SAMPLE": TokenType.TABLE_SAMPLE,
 731            "SQL_DOUBLE": TokenType.DOUBLE,
 732            "SQL_VARCHAR": TokenType.VARCHAR,
 733            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 734            "TAG": TokenType.TAG,
 735            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 736            "TOP": TokenType.TOP,
 737            "WAREHOUSE": TokenType.WAREHOUSE,
 738            "STREAMLIT": TokenType.STREAMLIT,
 739        }
 740        KEYWORDS.pop("/*+")
 741
 742        SINGLE_TOKENS = {
 743            **tokens.Tokenizer.SINGLE_TOKENS,
 744            "$": TokenType.PARAMETER,
 745        }
 746
 747        VAR_SINGLE_TOKENS = {"$"}
 748
 749        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 750
 751    class Generator(generator.Generator):
 752        PARAMETER_TOKEN = "$"
 753        MATCHED_BY_SOURCE = False
 754        SINGLE_STRING_INTERVAL = True
 755        JOIN_HINTS = False
 756        TABLE_HINTS = False
 757        QUERY_HINTS = False
 758        AGGREGATE_FILTER_SUPPORTED = False
 759        SUPPORTS_TABLE_COPY = False
 760        COLLATE_IS_FUNC = True
 761        LIMIT_ONLY_LITERALS = True
 762        JSON_KEY_VALUE_PAIR_SEP = ","
 763        INSERT_OVERWRITE = " OVERWRITE INTO"
 764        STRUCT_DELIMITER = ("(", ")")
 765        COPY_PARAMS_ARE_WRAPPED = False
 766        COPY_PARAMS_EQ_REQUIRED = True
 767        STAR_EXCEPT = "EXCLUDE"
 768        SUPPORTS_EXPLODING_PROJECTIONS = False
 769        ARRAY_CONCAT_IS_VAR_LEN = False
 770        SUPPORTS_CONVERT_TIMEZONE = True
 771        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 772
 773        TRANSFORMS = {
 774            **generator.Generator.TRANSFORMS,
 775            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 776            exp.ArgMax: rename_func("MAX_BY"),
 777            exp.ArgMin: rename_func("MIN_BY"),
 778            exp.Array: inline_array_sql,
 779            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 780            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 781            exp.AtTimeZone: lambda self, e: self.func(
 782                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 783            ),
 784            exp.BitwiseXor: rename_func("BITXOR"),
 785            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 786            exp.DateAdd: date_delta_sql("DATEADD"),
 787            exp.DateDiff: date_delta_sql("DATEDIFF"),
 788            exp.DateStrToDate: datestrtodate_sql,
 789            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 790            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 791            exp.DayOfYear: rename_func("DAYOFYEAR"),
 792            exp.Explode: rename_func("FLATTEN"),
 793            exp.Extract: rename_func("DATE_PART"),
 794            exp.FromTimeZone: lambda self, e: self.func(
 795                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 796            ),
 797            exp.GenerateSeries: lambda self, e: self.func(
 798                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 799            ),
 800            exp.GroupConcat: rename_func("LISTAGG"),
 801            exp.If: if_sql(name="IFF", false_value="NULL"),
 802            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 803            exp.JSONExtractScalar: lambda self, e: self.func(
 804                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 805            ),
 806            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 807            exp.JSONPathRoot: lambda *_: "",
 808            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 809            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 810            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 811            exp.Max: max_or_greatest,
 812            exp.Min: min_or_least,
 813            exp.ParseJSON: lambda self, e: self.func(
 814                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 815            ),
 816            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 817            exp.PercentileCont: transforms.preprocess(
 818                [transforms.add_within_group_for_percentiles]
 819            ),
 820            exp.PercentileDisc: transforms.preprocess(
 821                [transforms.add_within_group_for_percentiles]
 822            ),
 823            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 824            exp.RegexpILike: _regexpilike_sql,
 825            exp.Rand: rename_func("RANDOM"),
 826            exp.Select: transforms.preprocess(
 827                [
 828                    transforms.eliminate_distinct_on,
 829                    transforms.explode_to_unnest(),
 830                    transforms.eliminate_semi_and_anti_joins,
 831                    _unnest_generate_date_array,
 832                ]
 833            ),
 834            exp.SHA: rename_func("SHA1"),
 835            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 836            exp.StartsWith: rename_func("STARTSWITH"),
 837            exp.StrPosition: lambda self, e: self.func(
 838                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 839            ),
 840            exp.Stuff: rename_func("INSERT"),
 841            exp.TimeAdd: date_delta_sql("TIMEADD"),
 842            exp.TimestampDiff: lambda self, e: self.func(
 843                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 844            ),
 845            exp.TimestampTrunc: timestamptrunc_sql(),
 846            exp.TimeStrToTime: timestrtotime_sql,
 847            exp.TimeToStr: lambda self, e: self.func(
 848                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 849            ),
 850            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 851            exp.ToArray: rename_func("TO_ARRAY"),
 852            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 853            exp.ToDouble: rename_func("TO_DOUBLE"),
 854            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 855            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 856            exp.TsOrDsToDate: lambda self, e: self.func(
 857                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 858            ),
 859            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 860            exp.Uuid: rename_func("UUID_STRING"),
 861            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 862            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 863            exp.Xor: rename_func("BOOLXOR"),
 864        }
 865
 866        SUPPORTED_JSON_PATH_PARTS = {
 867            exp.JSONPathKey,
 868            exp.JSONPathRoot,
 869            exp.JSONPathSubscript,
 870        }
 871
 872        TYPE_MAPPING = {
 873            **generator.Generator.TYPE_MAPPING,
 874            exp.DataType.Type.NESTED: "OBJECT",
 875            exp.DataType.Type.STRUCT: "OBJECT",
 876        }
 877
 878        PROPERTIES_LOCATION = {
 879            **generator.Generator.PROPERTIES_LOCATION,
 880            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 881            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 882        }
 883
 884        UNSUPPORTED_VALUES_EXPRESSIONS = {
 885            exp.Map,
 886            exp.StarMap,
 887            exp.Struct,
 888            exp.VarMap,
 889        }
 890
 891        def with_properties(self, properties: exp.Properties) -> str:
 892            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 893
 894        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 895            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 896                values_as_table = False
 897
 898            return super().values_sql(expression, values_as_table=values_as_table)
 899
 900        def datatype_sql(self, expression: exp.DataType) -> str:
 901            expressions = expression.expressions
 902            if (
 903                expressions
 904                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 905                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 906            ):
 907                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 908                return "OBJECT"
 909
 910            return super().datatype_sql(expression)
 911
 912        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 913            return self.func(
 914                "TO_NUMBER",
 915                expression.this,
 916                expression.args.get("format"),
 917                expression.args.get("precision"),
 918                expression.args.get("scale"),
 919            )
 920
 921        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 922            milli = expression.args.get("milli")
 923            if milli is not None:
 924                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 925                expression.set("nano", milli_to_nano)
 926
 927            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 928
 929        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 930            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 931                return self.func("TO_GEOGRAPHY", expression.this)
 932            if expression.is_type(exp.DataType.Type.GEOMETRY):
 933                return self.func("TO_GEOMETRY", expression.this)
 934
 935            return super().cast_sql(expression, safe_prefix=safe_prefix)
 936
 937        def trycast_sql(self, expression: exp.TryCast) -> str:
 938            value = expression.this
 939
 940            if value.type is None:
 941                from sqlglot.optimizer.annotate_types import annotate_types
 942
 943                value = annotate_types(value)
 944
 945            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 946                return super().trycast_sql(expression)
 947
 948            # TRY_CAST only works for string values in Snowflake
 949            return self.cast_sql(expression)
 950
 951        def log_sql(self, expression: exp.Log) -> str:
 952            if not expression.expression:
 953                return self.func("LN", expression.this)
 954
 955            return super().log_sql(expression)
 956
 957        def unnest_sql(self, expression: exp.Unnest) -> str:
 958            unnest_alias = expression.args.get("alias")
 959            offset = expression.args.get("offset")
 960
 961            columns = [
 962                exp.to_identifier("seq"),
 963                exp.to_identifier("key"),
 964                exp.to_identifier("path"),
 965                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 966                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 967                or exp.to_identifier("value"),
 968                exp.to_identifier("this"),
 969            ]
 970
 971            if unnest_alias:
 972                unnest_alias.set("columns", columns)
 973            else:
 974                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 975
 976            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 977            alias = self.sql(unnest_alias)
 978            alias = f" AS {alias}" if alias else ""
 979            return f"{explode}{alias}"
 980
 981        def show_sql(self, expression: exp.Show) -> str:
 982            terse = "TERSE " if expression.args.get("terse") else ""
 983            history = " HISTORY" if expression.args.get("history") else ""
 984            like = self.sql(expression, "like")
 985            like = f" LIKE {like}" if like else ""
 986
 987            scope = self.sql(expression, "scope")
 988            scope = f" {scope}" if scope else ""
 989
 990            scope_kind = self.sql(expression, "scope_kind")
 991            if scope_kind:
 992                scope_kind = f" IN {scope_kind}"
 993
 994            starts_with = self.sql(expression, "starts_with")
 995            if starts_with:
 996                starts_with = f" STARTS WITH {starts_with}"
 997
 998            limit = self.sql(expression, "limit")
 999
1000            from_ = self.sql(expression, "from")
1001            if from_:
1002                from_ = f" FROM {from_}"
1003
1004            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1005
1006        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1007            # Other dialects don't support all of the following parameters, so we need to
1008            # generate default values as necessary to ensure the transpilation is correct
1009            group = expression.args.get("group")
1010
1011            # To avoid generating all these default values, we set group to None if
1012            # it's 0 (also default value) which doesn't trigger the following chain
1013            if group and group.name == "0":
1014                group = None
1015
1016            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1017            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1018            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1019
1020            return self.func(
1021                "REGEXP_SUBSTR",
1022                expression.this,
1023                expression.expression,
1024                position,
1025                occurrence,
1026                parameters,
1027                group,
1028            )
1029
1030        def describe_sql(self, expression: exp.Describe) -> str:
1031            # Default to table if kind is unknown
1032            kind_value = expression.args.get("kind") or "TABLE"
1033            kind = f" {kind_value}" if kind_value else ""
1034            this = f" {self.sql(expression, 'this')}"
1035            expressions = self.expressions(expression, flat=True)
1036            expressions = f" {expressions}" if expressions else ""
1037            return f"DESCRIBE{kind}{this}{expressions}"
1038
1039        def generatedasidentitycolumnconstraint_sql(
1040            self, expression: exp.GeneratedAsIdentityColumnConstraint
1041        ) -> str:
1042            start = expression.args.get("start")
1043            start = f" START {start}" if start else ""
1044            increment = expression.args.get("increment")
1045            increment = f" INCREMENT {increment}" if increment else ""
1046            return f"AUTOINCREMENT{start}{increment}"
1047
1048        def cluster_sql(self, expression: exp.Cluster) -> str:
1049            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1050
1051        def struct_sql(self, expression: exp.Struct) -> str:
1052            keys = []
1053            values = []
1054
1055            for i, e in enumerate(expression.expressions):
1056                if isinstance(e, exp.PropertyEQ):
1057                    keys.append(
1058                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1059                    )
1060                    values.append(e.expression)
1061                else:
1062                    keys.append(exp.Literal.string(f"_{i}"))
1063                    values.append(e)
1064
1065            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1066
1067        @generator.unsupported_args("weight", "accuracy")
1068        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1069            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1070
1071        def alterset_sql(self, expression: exp.AlterSet) -> str:
1072            exprs = self.expressions(expression, flat=True)
1073            exprs = f" {exprs}" if exprs else ""
1074            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1075            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1076            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1077            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1078            tag = self.expressions(expression, key="tag", flat=True)
1079            tag = f" TAG {tag}" if tag else ""
1080
1081            return f"SET{exprs}{file_format}{copy_options}{tag}"
1082
1083        def strtotime_sql(self, expression: exp.StrToTime):
1084            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1085            return self.func(
1086                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1087            )
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
276    def quote_identifier(self, expression: E, identify: bool = True) -> E:
277        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
278        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
279        if (
280            isinstance(expression, exp.Identifier)
281            and isinstance(expression.parent, exp.Table)
282            and expression.name.lower() == "dual"
283        ):
284            return expression  # type: ignore
285
286        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
288    class Parser(parser.Parser):
289        IDENTIFY_PIVOT_STRINGS = True
290        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
291        COLON_IS_VARIANT_EXTRACT = True
292
293        ID_VAR_TOKENS = {
294            *parser.Parser.ID_VAR_TOKENS,
295            TokenType.MATCH_CONDITION,
296        }
297
298        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
299        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
300
301        FUNCTIONS = {
302            **parser.Parser.FUNCTIONS,
303            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
304            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
305            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
306                this=seq_get(args, 1), expression=seq_get(args, 0)
307            ),
308            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
309                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
310                start=seq_get(args, 0),
311                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
312                step=seq_get(args, 2),
313            ),
314            "BITXOR": binary_from_function(exp.BitwiseXor),
315            "BIT_XOR": binary_from_function(exp.BitwiseXor),
316            "BOOLXOR": binary_from_function(exp.Xor),
317            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
318            "DATE_TRUNC": _date_trunc_to_time,
319            "DATEADD": _build_date_time_add(exp.DateAdd),
320            "DATEDIFF": _build_datediff,
321            "DIV0": _build_if_from_div0,
322            "FLATTEN": exp.Explode.from_arg_list,
323            "GET_PATH": lambda args, dialect: exp.JSONExtract(
324                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
325            ),
326            "IFF": exp.If.from_arg_list,
327            "LAST_DAY": lambda args: exp.LastDay(
328                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
329            ),
330            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
331            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
332            "LISTAGG": exp.GroupConcat.from_arg_list,
333            "MEDIAN": lambda args: exp.PercentileCont(
334                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
335            ),
336            "NULLIFZERO": _build_if_from_nullifzero,
337            "OBJECT_CONSTRUCT": _build_object_construct,
338            "REGEXP_REPLACE": _build_regexp_replace,
339            "REGEXP_SUBSTR": lambda args: exp.RegexpExtract(
340                this=seq_get(args, 0),
341                expression=seq_get(args, 1),
342                position=seq_get(args, 2),
343                occurrence=seq_get(args, 3),
344                parameters=seq_get(args, 4),
345                group=seq_get(args, 5) or exp.Literal.number(0),
346            ),
347            "RLIKE": exp.RegexpLike.from_arg_list,
348            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
349            "TIMEADD": _build_date_time_add(exp.TimeAdd),
350            "TIMEDIFF": _build_datediff,
351            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
352            "TIMESTAMPDIFF": _build_datediff,
353            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
354            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
355            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
356            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
357            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
358            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
359            "TRY_TO_TIMESTAMP": _build_datetime(
360                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
361            ),
362            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
363            "TO_NUMBER": lambda args: exp.ToNumber(
364                this=seq_get(args, 0),
365                format=seq_get(args, 1),
366                precision=seq_get(args, 2),
367                scale=seq_get(args, 3),
368            ),
369            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
370            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
371            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
372            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
373            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
374            "TO_VARCHAR": exp.ToChar.from_arg_list,
375            "ZEROIFNULL": _build_if_from_zeroifnull,
376        }
377
378        FUNCTION_PARSERS = {
379            **parser.Parser.FUNCTION_PARSERS,
380            "DATE_PART": lambda self: self._parse_date_part(),
381            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
382        }
383        FUNCTION_PARSERS.pop("TRIM")
384
385        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
386
387        RANGE_PARSERS = {
388            **parser.Parser.RANGE_PARSERS,
389            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
390            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
391        }
392
393        ALTER_PARSERS = {
394            **parser.Parser.ALTER_PARSERS,
395            "UNSET": lambda self: self.expression(
396                exp.Set,
397                tag=self._match_text_seq("TAG"),
398                expressions=self._parse_csv(self._parse_id_var),
399                unset=True,
400            ),
401        }
402
403        STATEMENT_PARSERS = {
404            **parser.Parser.STATEMENT_PARSERS,
405            TokenType.SHOW: lambda self: self._parse_show(),
406        }
407
408        PROPERTY_PARSERS = {
409            **parser.Parser.PROPERTY_PARSERS,
410            "LOCATION": lambda self: self._parse_location_property(),
411        }
412
413        TYPE_CONVERTERS = {
414            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
415            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
416        }
417
418        SHOW_PARSERS = {
419            "SCHEMAS": _show_parser("SCHEMAS"),
420            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
421            "OBJECTS": _show_parser("OBJECTS"),
422            "TERSE OBJECTS": _show_parser("OBJECTS"),
423            "TABLES": _show_parser("TABLES"),
424            "TERSE TABLES": _show_parser("TABLES"),
425            "VIEWS": _show_parser("VIEWS"),
426            "TERSE VIEWS": _show_parser("VIEWS"),
427            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
428            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
429            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
430            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
431            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
432            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
433            "SEQUENCES": _show_parser("SEQUENCES"),
434            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
435            "COLUMNS": _show_parser("COLUMNS"),
436            "USERS": _show_parser("USERS"),
437            "TERSE USERS": _show_parser("USERS"),
438        }
439
440        CONSTRAINT_PARSERS = {
441            **parser.Parser.CONSTRAINT_PARSERS,
442            "WITH": lambda self: self._parse_with_constraint(),
443            "MASKING": lambda self: self._parse_with_constraint(),
444            "PROJECTION": lambda self: self._parse_with_constraint(),
445            "TAG": lambda self: self._parse_with_constraint(),
446        }
447
448        STAGED_FILE_SINGLE_TOKENS = {
449            TokenType.DOT,
450            TokenType.MOD,
451            TokenType.SLASH,
452        }
453
454        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
455
456        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
457
458        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
459
460        LAMBDAS = {
461            **parser.Parser.LAMBDAS,
462            TokenType.ARROW: lambda self, expressions: self.expression(
463                exp.Lambda,
464                this=self._replace_lambda(
465                    self._parse_assignment(),
466                    expressions,
467                ),
468                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
469            ),
470        }
471
472        def _negate_range(
473            self, this: t.Optional[exp.Expression] = None
474        ) -> t.Optional[exp.Expression]:
475            if not this:
476                return this
477
478            query = this.args.get("query")
479            if isinstance(this, exp.In) and isinstance(query, exp.Query):
480                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
481                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
482                # which can produce different results (most likely a SnowFlake bug).
483                #
484                # https://docs.snowflake.com/en/sql-reference/functions/in
485                # Context: https://github.com/tobymao/sqlglot/issues/3890
486                return self.expression(
487                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
488                )
489
490            return self.expression(exp.Not, this=this)
491
492        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
493            if self._prev.token_type != TokenType.WITH:
494                self._retreat(self._index - 1)
495
496            if self._match_text_seq("MASKING", "POLICY"):
497                policy = self._parse_column()
498                return self.expression(
499                    exp.MaskingPolicyColumnConstraint,
500                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
501                    expressions=self._match(TokenType.USING)
502                    and self._parse_wrapped_csv(self._parse_id_var),
503                )
504            if self._match_text_seq("PROJECTION", "POLICY"):
505                policy = self._parse_column()
506                return self.expression(
507                    exp.ProjectionPolicyColumnConstraint,
508                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
509                )
510            if self._match(TokenType.TAG):
511                return self.expression(
512                    exp.TagColumnConstraint,
513                    expressions=self._parse_wrapped_csv(self._parse_property),
514                )
515
516            return None
517
518        def _parse_create(self) -> exp.Create | exp.Command:
519            expression = super()._parse_create()
520            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
521                # Replace the Table node with the enclosed Identifier
522                expression.this.replace(expression.this.this)
523
524            return expression
525
526        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
527        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
528        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
529            this = self._parse_var() or self._parse_type()
530
531            if not this:
532                return None
533
534            self._match(TokenType.COMMA)
535            expression = self._parse_bitwise()
536            this = map_date_part(this)
537            name = this.name.upper()
538
539            if name.startswith("EPOCH"):
540                if name == "EPOCH_MILLISECOND":
541                    scale = 10**3
542                elif name == "EPOCH_MICROSECOND":
543                    scale = 10**6
544                elif name == "EPOCH_NANOSECOND":
545                    scale = 10**9
546                else:
547                    scale = None
548
549                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
550                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
551
552                if scale:
553                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
554
555                return to_unix
556
557            return self.expression(exp.Extract, this=this, expression=expression)
558
559        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
560            if is_map:
561                # Keys are strings in Snowflake's objects, see also:
562                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
563                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
564                return self._parse_slice(self._parse_string())
565
566            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
567
568        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
569            lateral = super()._parse_lateral()
570            if not lateral:
571                return lateral
572
573            if isinstance(lateral.this, exp.Explode):
574                table_alias = lateral.args.get("alias")
575                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
576                if table_alias and not table_alias.args.get("columns"):
577                    table_alias.set("columns", columns)
578                elif not table_alias:
579                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
580
581            return lateral
582
583        def _parse_table_parts(
584            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
585        ) -> exp.Table:
586            # https://docs.snowflake.com/en/user-guide/querying-stage
587            if self._match(TokenType.STRING, advance=False):
588                table = self._parse_string()
589            elif self._match_text_seq("@", advance=False):
590                table = self._parse_location_path()
591            else:
592                table = None
593
594            if table:
595                file_format = None
596                pattern = None
597
598                wrapped = self._match(TokenType.L_PAREN)
599                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
600                    if self._match_text_seq("FILE_FORMAT", "=>"):
601                        file_format = self._parse_string() or super()._parse_table_parts(
602                            is_db_reference=is_db_reference
603                        )
604                    elif self._match_text_seq("PATTERN", "=>"):
605                        pattern = self._parse_string()
606                    else:
607                        break
608
609                    self._match(TokenType.COMMA)
610
611                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
612            else:
613                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
614
615            return table
616
617        def _parse_id_var(
618            self,
619            any_token: bool = True,
620            tokens: t.Optional[t.Collection[TokenType]] = None,
621        ) -> t.Optional[exp.Expression]:
622            if self._match_text_seq("IDENTIFIER", "("):
623                identifier = (
624                    super()._parse_id_var(any_token=any_token, tokens=tokens)
625                    or self._parse_string()
626                )
627                self._match_r_paren()
628                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
629
630            return super()._parse_id_var(any_token=any_token, tokens=tokens)
631
632        def _parse_show_snowflake(self, this: str) -> exp.Show:
633            scope = None
634            scope_kind = None
635
636            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
637            # which is syntactically valid but has no effect on the output
638            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
639
640            history = self._match_text_seq("HISTORY")
641
642            like = self._parse_string() if self._match(TokenType.LIKE) else None
643
644            if self._match(TokenType.IN):
645                if self._match_text_seq("ACCOUNT"):
646                    scope_kind = "ACCOUNT"
647                elif self._match_set(self.DB_CREATABLES):
648                    scope_kind = self._prev.text.upper()
649                    if self._curr:
650                        scope = self._parse_table_parts()
651                elif self._curr:
652                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
653                    scope = self._parse_table_parts()
654
655            return self.expression(
656                exp.Show,
657                **{
658                    "terse": terse,
659                    "this": this,
660                    "history": history,
661                    "like": like,
662                    "scope": scope,
663                    "scope_kind": scope_kind,
664                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
665                    "limit": self._parse_limit(),
666                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
667                },
668            )
669
670        def _parse_location_property(self) -> exp.LocationProperty:
671            self._match(TokenType.EQ)
672            return self.expression(exp.LocationProperty, this=self._parse_location_path())
673
674        def _parse_file_location(self) -> t.Optional[exp.Expression]:
675            # Parse either a subquery or a staged file
676            return (
677                self._parse_select(table=True, parse_subquery_alias=False)
678                if self._match(TokenType.L_PAREN, advance=False)
679                else self._parse_table_parts()
680            )
681
682        def _parse_location_path(self) -> exp.Var:
683            parts = [self._advance_any(ignore_reserved=True)]
684
685            # We avoid consuming a comma token because external tables like @foo and @bar
686            # can be joined in a query with a comma separator, as well as closing paren
687            # in case of subqueries
688            while self._is_connected() and not self._match_set(
689                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
690            ):
691                parts.append(self._advance_any(ignore_reserved=True))
692
693            return exp.var("".join(part.text for part in parts if part))
694
695        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
696            this = super()._parse_lambda_arg()
697
698            if not this:
699                return this
700
701            typ = self._parse_types()
702
703            if typ:
704                return self.expression(exp.Cast, this=this, to=typ)
705
706            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.LINESTRING: 'LINESTRING'>, <TokenType.ALL: 'ALL'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.END: 'END'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.BIT: 'BIT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SET: 'SET'>, <TokenType.MONEY: 'MONEY'>, <TokenType.NULL: 'NULL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.RING: 'RING'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INT256: 'INT256'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.KILL: 'KILL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.ROW: 'ROW'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.LEFT: 'LEFT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IS: 'IS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.COPY: 'COPY'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.MAP: 'MAP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UINT: 'UINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.INT: 'INT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.DESC: 'DESC'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.ANTI: 'ANTI'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.JSON: 'JSON'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.FULL: 'FULL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.ASOF: 'ASOF'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.RENAME: 'RENAME'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.VAR: 'VAR'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FALSE: 'FALSE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CUBE: 'CUBE'>, <TokenType.TIME: 'TIME'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DATE: 'DATE'>, <TokenType.ANY: 'ANY'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.XML: 'XML'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.DIV: 'DIV'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.NAME: 'NAME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ASC: 'ASC'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LIST: 'LIST'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT128: 'INT128'>, <TokenType.ROWS: 'ROWS'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.VIEW: 'VIEW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.INET: 'INET'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IPV6: 'IPV6'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.SOME: 'SOME'>, <TokenType.TAG: 'TAG'>, <TokenType.ENUM: 'ENUM'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.POINT: 'POINT'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.CASE: 'CASE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TABLE: 'TABLE'>, <TokenType.USE: 'USE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UINT128: 'UINT128'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.APPLY: 'APPLY'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>}
TABLE_ALIAS_TOKENS = {<TokenType.LINESTRING: 'LINESTRING'>, <TokenType.ALL: 'ALL'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.IPV4: 'IPV4'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.END: 'END'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.BIT: 'BIT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SET: 'SET'>, <TokenType.MONEY: 'MONEY'>, <TokenType.NULL: 'NULL'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UINT256: 'UINT256'>, <TokenType.RING: 'RING'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INT256: 'INT256'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.ROW: 'ROW'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.IS: 'IS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.COPY: 'COPY'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.MAP: 'MAP'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UINT: 'UINT'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.JSONB: 'JSONB'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.INT: 'INT'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.DESC: 'DESC'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.ANTI: 'ANTI'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.JSON: 'JSON'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.RENAME: 'RENAME'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.VAR: 'VAR'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FALSE: 'FALSE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CUBE: 'CUBE'>, <TokenType.TIME: 'TIME'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DATE: 'DATE'>, <TokenType.ANY: 'ANY'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.XML: 'XML'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.DIV: 'DIV'>, <TokenType.NAME: 'NAME'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ASC: 'ASC'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.LIST: 'LIST'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT128: 'INT128'>, <TokenType.ROWS: 'ROWS'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.VIEW: 'VIEW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.INET: 'INET'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IPV6: 'IPV6'>, <TokenType.FIRST: 'FIRST'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.SOME: 'SOME'>, <TokenType.TAG: 'TAG'>, <TokenType.ENUM: 'ENUM'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.POINT: 'POINT'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.CASE: 'CASE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.TABLE: 'TABLE'>, <TokenType.USE: 'USE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UINT128: 'UINT128'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.KILL: 'KILL'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function Snowflake.Parser.<lambda>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'SEQUENCES', 'TABLES', 'OBJECTS', 'IMPORTED KEYS', 'VIEWS', 'UNIQUE KEYS'}
NON_TABLE_CREATABLES = {'STORAGE INTEGRATION', 'WAREHOUSE', 'TAG', 'STREAMLIT'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
708    class Tokenizer(tokens.Tokenizer):
709        STRING_ESCAPES = ["\\", "'"]
710        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
711        RAW_STRINGS = ["$$"]
712        COMMENTS = ["--", "//", ("/*", "*/")]
713        NESTED_COMMENTS = False
714
715        KEYWORDS = {
716            **tokens.Tokenizer.KEYWORDS,
717            "BYTEINT": TokenType.INT,
718            "CHAR VARYING": TokenType.VARCHAR,
719            "CHARACTER VARYING": TokenType.VARCHAR,
720            "EXCLUDE": TokenType.EXCEPT,
721            "ILIKE ANY": TokenType.ILIKE_ANY,
722            "LIKE ANY": TokenType.LIKE_ANY,
723            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
724            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
725            "MINUS": TokenType.EXCEPT,
726            "NCHAR VARYING": TokenType.VARCHAR,
727            "PUT": TokenType.COMMAND,
728            "REMOVE": TokenType.COMMAND,
729            "RM": TokenType.COMMAND,
730            "SAMPLE": TokenType.TABLE_SAMPLE,
731            "SQL_DOUBLE": TokenType.DOUBLE,
732            "SQL_VARCHAR": TokenType.VARCHAR,
733            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
734            "TAG": TokenType.TAG,
735            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
736            "TOP": TokenType.TOP,
737            "WAREHOUSE": TokenType.WAREHOUSE,
738            "STREAMLIT": TokenType.STREAMLIT,
739        }
740        KEYWORDS.pop("/*+")
741
742        SINGLE_TOKENS = {
743            **tokens.Tokenizer.SINGLE_TOKENS,
744            "$": TokenType.PARAMETER,
745        }
746
747        VAR_SINGLE_TOKENS = {"$"}
748
749        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.RENAME: 'RENAME'>, <TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 751    class Generator(generator.Generator):
 752        PARAMETER_TOKEN = "$"
 753        MATCHED_BY_SOURCE = False
 754        SINGLE_STRING_INTERVAL = True
 755        JOIN_HINTS = False
 756        TABLE_HINTS = False
 757        QUERY_HINTS = False
 758        AGGREGATE_FILTER_SUPPORTED = False
 759        SUPPORTS_TABLE_COPY = False
 760        COLLATE_IS_FUNC = True
 761        LIMIT_ONLY_LITERALS = True
 762        JSON_KEY_VALUE_PAIR_SEP = ","
 763        INSERT_OVERWRITE = " OVERWRITE INTO"
 764        STRUCT_DELIMITER = ("(", ")")
 765        COPY_PARAMS_ARE_WRAPPED = False
 766        COPY_PARAMS_EQ_REQUIRED = True
 767        STAR_EXCEPT = "EXCLUDE"
 768        SUPPORTS_EXPLODING_PROJECTIONS = False
 769        ARRAY_CONCAT_IS_VAR_LEN = False
 770        SUPPORTS_CONVERT_TIMEZONE = True
 771        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 772
 773        TRANSFORMS = {
 774            **generator.Generator.TRANSFORMS,
 775            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 776            exp.ArgMax: rename_func("MAX_BY"),
 777            exp.ArgMin: rename_func("MIN_BY"),
 778            exp.Array: inline_array_sql,
 779            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 780            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 781            exp.AtTimeZone: lambda self, e: self.func(
 782                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 783            ),
 784            exp.BitwiseXor: rename_func("BITXOR"),
 785            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 786            exp.DateAdd: date_delta_sql("DATEADD"),
 787            exp.DateDiff: date_delta_sql("DATEDIFF"),
 788            exp.DateStrToDate: datestrtodate_sql,
 789            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 790            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 791            exp.DayOfYear: rename_func("DAYOFYEAR"),
 792            exp.Explode: rename_func("FLATTEN"),
 793            exp.Extract: rename_func("DATE_PART"),
 794            exp.FromTimeZone: lambda self, e: self.func(
 795                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 796            ),
 797            exp.GenerateSeries: lambda self, e: self.func(
 798                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 799            ),
 800            exp.GroupConcat: rename_func("LISTAGG"),
 801            exp.If: if_sql(name="IFF", false_value="NULL"),
 802            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 803            exp.JSONExtractScalar: lambda self, e: self.func(
 804                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 805            ),
 806            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 807            exp.JSONPathRoot: lambda *_: "",
 808            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 809            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 810            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 811            exp.Max: max_or_greatest,
 812            exp.Min: min_or_least,
 813            exp.ParseJSON: lambda self, e: self.func(
 814                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 815            ),
 816            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 817            exp.PercentileCont: transforms.preprocess(
 818                [transforms.add_within_group_for_percentiles]
 819            ),
 820            exp.PercentileDisc: transforms.preprocess(
 821                [transforms.add_within_group_for_percentiles]
 822            ),
 823            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 824            exp.RegexpILike: _regexpilike_sql,
 825            exp.Rand: rename_func("RANDOM"),
 826            exp.Select: transforms.preprocess(
 827                [
 828                    transforms.eliminate_distinct_on,
 829                    transforms.explode_to_unnest(),
 830                    transforms.eliminate_semi_and_anti_joins,
 831                    _unnest_generate_date_array,
 832                ]
 833            ),
 834            exp.SHA: rename_func("SHA1"),
 835            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 836            exp.StartsWith: rename_func("STARTSWITH"),
 837            exp.StrPosition: lambda self, e: self.func(
 838                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 839            ),
 840            exp.Stuff: rename_func("INSERT"),
 841            exp.TimeAdd: date_delta_sql("TIMEADD"),
 842            exp.TimestampDiff: lambda self, e: self.func(
 843                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 844            ),
 845            exp.TimestampTrunc: timestamptrunc_sql(),
 846            exp.TimeStrToTime: timestrtotime_sql,
 847            exp.TimeToStr: lambda self, e: self.func(
 848                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 849            ),
 850            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 851            exp.ToArray: rename_func("TO_ARRAY"),
 852            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 853            exp.ToDouble: rename_func("TO_DOUBLE"),
 854            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 855            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 856            exp.TsOrDsToDate: lambda self, e: self.func(
 857                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 858            ),
 859            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 860            exp.Uuid: rename_func("UUID_STRING"),
 861            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 862            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 863            exp.Xor: rename_func("BOOLXOR"),
 864        }
 865
 866        SUPPORTED_JSON_PATH_PARTS = {
 867            exp.JSONPathKey,
 868            exp.JSONPathRoot,
 869            exp.JSONPathSubscript,
 870        }
 871
 872        TYPE_MAPPING = {
 873            **generator.Generator.TYPE_MAPPING,
 874            exp.DataType.Type.NESTED: "OBJECT",
 875            exp.DataType.Type.STRUCT: "OBJECT",
 876        }
 877
 878        PROPERTIES_LOCATION = {
 879            **generator.Generator.PROPERTIES_LOCATION,
 880            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 881            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 882        }
 883
 884        UNSUPPORTED_VALUES_EXPRESSIONS = {
 885            exp.Map,
 886            exp.StarMap,
 887            exp.Struct,
 888            exp.VarMap,
 889        }
 890
 891        def with_properties(self, properties: exp.Properties) -> str:
 892            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 893
 894        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 895            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 896                values_as_table = False
 897
 898            return super().values_sql(expression, values_as_table=values_as_table)
 899
 900        def datatype_sql(self, expression: exp.DataType) -> str:
 901            expressions = expression.expressions
 902            if (
 903                expressions
 904                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 905                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 906            ):
 907                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 908                return "OBJECT"
 909
 910            return super().datatype_sql(expression)
 911
 912        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 913            return self.func(
 914                "TO_NUMBER",
 915                expression.this,
 916                expression.args.get("format"),
 917                expression.args.get("precision"),
 918                expression.args.get("scale"),
 919            )
 920
 921        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 922            milli = expression.args.get("milli")
 923            if milli is not None:
 924                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 925                expression.set("nano", milli_to_nano)
 926
 927            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 928
 929        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 930            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
 931                return self.func("TO_GEOGRAPHY", expression.this)
 932            if expression.is_type(exp.DataType.Type.GEOMETRY):
 933                return self.func("TO_GEOMETRY", expression.this)
 934
 935            return super().cast_sql(expression, safe_prefix=safe_prefix)
 936
 937        def trycast_sql(self, expression: exp.TryCast) -> str:
 938            value = expression.this
 939
 940            if value.type is None:
 941                from sqlglot.optimizer.annotate_types import annotate_types
 942
 943                value = annotate_types(value)
 944
 945            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 946                return super().trycast_sql(expression)
 947
 948            # TRY_CAST only works for string values in Snowflake
 949            return self.cast_sql(expression)
 950
 951        def log_sql(self, expression: exp.Log) -> str:
 952            if not expression.expression:
 953                return self.func("LN", expression.this)
 954
 955            return super().log_sql(expression)
 956
 957        def unnest_sql(self, expression: exp.Unnest) -> str:
 958            unnest_alias = expression.args.get("alias")
 959            offset = expression.args.get("offset")
 960
 961            columns = [
 962                exp.to_identifier("seq"),
 963                exp.to_identifier("key"),
 964                exp.to_identifier("path"),
 965                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 966                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 967                or exp.to_identifier("value"),
 968                exp.to_identifier("this"),
 969            ]
 970
 971            if unnest_alias:
 972                unnest_alias.set("columns", columns)
 973            else:
 974                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 975
 976            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 977            alias = self.sql(unnest_alias)
 978            alias = f" AS {alias}" if alias else ""
 979            return f"{explode}{alias}"
 980
 981        def show_sql(self, expression: exp.Show) -> str:
 982            terse = "TERSE " if expression.args.get("terse") else ""
 983            history = " HISTORY" if expression.args.get("history") else ""
 984            like = self.sql(expression, "like")
 985            like = f" LIKE {like}" if like else ""
 986
 987            scope = self.sql(expression, "scope")
 988            scope = f" {scope}" if scope else ""
 989
 990            scope_kind = self.sql(expression, "scope_kind")
 991            if scope_kind:
 992                scope_kind = f" IN {scope_kind}"
 993
 994            starts_with = self.sql(expression, "starts_with")
 995            if starts_with:
 996                starts_with = f" STARTS WITH {starts_with}"
 997
 998            limit = self.sql(expression, "limit")
 999
1000            from_ = self.sql(expression, "from")
1001            if from_:
1002                from_ = f" FROM {from_}"
1003
1004            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1005
1006        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1007            # Other dialects don't support all of the following parameters, so we need to
1008            # generate default values as necessary to ensure the transpilation is correct
1009            group = expression.args.get("group")
1010
1011            # To avoid generating all these default values, we set group to None if
1012            # it's 0 (also default value) which doesn't trigger the following chain
1013            if group and group.name == "0":
1014                group = None
1015
1016            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1017            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1018            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1019
1020            return self.func(
1021                "REGEXP_SUBSTR",
1022                expression.this,
1023                expression.expression,
1024                position,
1025                occurrence,
1026                parameters,
1027                group,
1028            )
1029
1030        def describe_sql(self, expression: exp.Describe) -> str:
1031            # Default to table if kind is unknown
1032            kind_value = expression.args.get("kind") or "TABLE"
1033            kind = f" {kind_value}" if kind_value else ""
1034            this = f" {self.sql(expression, 'this')}"
1035            expressions = self.expressions(expression, flat=True)
1036            expressions = f" {expressions}" if expressions else ""
1037            return f"DESCRIBE{kind}{this}{expressions}"
1038
1039        def generatedasidentitycolumnconstraint_sql(
1040            self, expression: exp.GeneratedAsIdentityColumnConstraint
1041        ) -> str:
1042            start = expression.args.get("start")
1043            start = f" START {start}" if start else ""
1044            increment = expression.args.get("increment")
1045            increment = f" INCREMENT {increment}" if increment else ""
1046            return f"AUTOINCREMENT{start}{increment}"
1047
1048        def cluster_sql(self, expression: exp.Cluster) -> str:
1049            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1050
1051        def struct_sql(self, expression: exp.Struct) -> str:
1052            keys = []
1053            values = []
1054
1055            for i, e in enumerate(expression.expressions):
1056                if isinstance(e, exp.PropertyEQ):
1057                    keys.append(
1058                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1059                    )
1060                    values.append(e.expression)
1061                else:
1062                    keys.append(exp.Literal.string(f"_{i}"))
1063                    values.append(e)
1064
1065            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1066
1067        @generator.unsupported_args("weight", "accuracy")
1068        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1069            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1070
1071        def alterset_sql(self, expression: exp.AlterSet) -> str:
1072            exprs = self.expressions(expression, flat=True)
1073            exprs = f" {exprs}" if exprs else ""
1074            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1075            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1076            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1077            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1078            tag = self.expressions(expression, key="tag", flat=True)
1079            tag = f" TAG {tag}" if tag else ""
1080
1081            return f"SET{exprs}{file_format}{copy_options}{tag}"
1082
1083        def strtotime_sql(self, expression: exp.StrToTime):
1084            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1085            return self.func(
1086                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1087            )

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.VarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
891        def with_properties(self, properties: exp.Properties) -> str:
892            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
894        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
895            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
896                values_as_table = False
897
898            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
900        def datatype_sql(self, expression: exp.DataType) -> str:
901            expressions = expression.expressions
902            if (
903                expressions
904                and expression.is_type(*exp.DataType.STRUCT_TYPES)
905                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
906            ):
907                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
908                return "OBJECT"
909
910            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
912        def tonumber_sql(self, expression: exp.ToNumber) -> str:
913            return self.func(
914                "TO_NUMBER",
915                expression.this,
916                expression.args.get("format"),
917                expression.args.get("precision"),
918                expression.args.get("scale"),
919            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
921        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
922            milli = expression.args.get("milli")
923            if milli is not None:
924                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
925                expression.set("nano", milli_to_nano)
926
927            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
929        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
930            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
931                return self.func("TO_GEOGRAPHY", expression.this)
932            if expression.is_type(exp.DataType.Type.GEOMETRY):
933                return self.func("TO_GEOMETRY", expression.this)
934
935            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
937        def trycast_sql(self, expression: exp.TryCast) -> str:
938            value = expression.this
939
940            if value.type is None:
941                from sqlglot.optimizer.annotate_types import annotate_types
942
943                value = annotate_types(value)
944
945            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
946                return super().trycast_sql(expression)
947
948            # TRY_CAST only works for string values in Snowflake
949            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
951        def log_sql(self, expression: exp.Log) -> str:
952            if not expression.expression:
953                return self.func("LN", expression.this)
954
955            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
957        def unnest_sql(self, expression: exp.Unnest) -> str:
958            unnest_alias = expression.args.get("alias")
959            offset = expression.args.get("offset")
960
961            columns = [
962                exp.to_identifier("seq"),
963                exp.to_identifier("key"),
964                exp.to_identifier("path"),
965                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
966                seq_get(unnest_alias.columns if unnest_alias else [], 0)
967                or exp.to_identifier("value"),
968                exp.to_identifier("this"),
969            ]
970
971            if unnest_alias:
972                unnest_alias.set("columns", columns)
973            else:
974                unnest_alias = exp.TableAlias(this="_u", columns=columns)
975
976            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
977            alias = self.sql(unnest_alias)
978            alias = f" AS {alias}" if alias else ""
979            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
 981        def show_sql(self, expression: exp.Show) -> str:
 982            terse = "TERSE " if expression.args.get("terse") else ""
 983            history = " HISTORY" if expression.args.get("history") else ""
 984            like = self.sql(expression, "like")
 985            like = f" LIKE {like}" if like else ""
 986
 987            scope = self.sql(expression, "scope")
 988            scope = f" {scope}" if scope else ""
 989
 990            scope_kind = self.sql(expression, "scope_kind")
 991            if scope_kind:
 992                scope_kind = f" IN {scope_kind}"
 993
 994            starts_with = self.sql(expression, "starts_with")
 995            if starts_with:
 996                starts_with = f" STARTS WITH {starts_with}"
 997
 998            limit = self.sql(expression, "limit")
 999
1000            from_ = self.sql(expression, "from")
1001            if from_:
1002                from_ = f" FROM {from_}"
1003
1004            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
1006        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
1007            # Other dialects don't support all of the following parameters, so we need to
1008            # generate default values as necessary to ensure the transpilation is correct
1009            group = expression.args.get("group")
1010
1011            # To avoid generating all these default values, we set group to None if
1012            # it's 0 (also default value) which doesn't trigger the following chain
1013            if group and group.name == "0":
1014                group = None
1015
1016            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
1017            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
1018            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
1019
1020            return self.func(
1021                "REGEXP_SUBSTR",
1022                expression.this,
1023                expression.expression,
1024                position,
1025                occurrence,
1026                parameters,
1027                group,
1028            )
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1030        def describe_sql(self, expression: exp.Describe) -> str:
1031            # Default to table if kind is unknown
1032            kind_value = expression.args.get("kind") or "TABLE"
1033            kind = f" {kind_value}" if kind_value else ""
1034            this = f" {self.sql(expression, 'this')}"
1035            expressions = self.expressions(expression, flat=True)
1036            expressions = f" {expressions}" if expressions else ""
1037            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1039        def generatedasidentitycolumnconstraint_sql(
1040            self, expression: exp.GeneratedAsIdentityColumnConstraint
1041        ) -> str:
1042            start = expression.args.get("start")
1043            start = f" START {start}" if start else ""
1044            increment = expression.args.get("increment")
1045            increment = f" INCREMENT {increment}" if increment else ""
1046            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1048        def cluster_sql(self, expression: exp.Cluster) -> str:
1049            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1051        def struct_sql(self, expression: exp.Struct) -> str:
1052            keys = []
1053            values = []
1054
1055            for i, e in enumerate(expression.expressions):
1056                if isinstance(e, exp.PropertyEQ):
1057                    keys.append(
1058                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1059                    )
1060                    values.append(e.expression)
1061                else:
1062                    keys.append(exp.Literal.string(f"_{i}"))
1063                    values.append(e)
1064
1065            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@generator.unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1067        @generator.unsupported_args("weight", "accuracy")
1068        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1069            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1071        def alterset_sql(self, expression: exp.AlterSet) -> str:
1072            exprs = self.expressions(expression, flat=True)
1073            exprs = f" {exprs}" if exprs else ""
1074            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1075            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1076            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1077            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1078            tag = self.expressions(expression, key="tag", flat=True)
1079            tag = f" TAG {tag}" if tag else ""
1080
1081            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1083        def strtotime_sql(self, expression: exp.StrToTime):
1084            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1085            return self.func(
1086                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1087            )
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
PARSE_JSON_NAME
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql