o
    ȷe	                     @   sP   d Z ddlmZ ddlmZ ddlmZ ddlmZ G dd dZ	dd	d
Z
dS )z	SQL Lexer    )
TextIOBase)tokens)	SQL_REGEX)consumec                   @   s   e Zd ZdZedddZdS )Lexerz?Lexer
    Empty class. Leaving for backwards-compatibility
    Nc                 c   s   t | tr
|  } t | trn,t | tr3|r| |} nz| d} W n ty2   | d} Y n
w tdt	| t
| }|D ]>\}}tD ]1\}}|| |}|sTqHt |tjrb|| fV  nt|rm|| V  t|| | d   ntj|fV  qBdS )a  
        Return an iterable of (tokentype, value) pairs generated from
        `text`. If `unfiltered` is set to `True`, the filtering mechanism
        is bypassed even if filters are defined.

        Also preprocess the text, i.e. expand tabs and strip it if
        wanted and applies registered filters.

        Split ``text`` into (tokentype, text) pairs.

        ``stack`` is the initial stack (default: ``['root']``)
        zutf-8zunicode-escapez+Expected text or file-like object, got {!r}   N)
isinstancer   readstrbytesdecodeUnicodeDecodeError	TypeErrorformattype	enumerater   r   
_TokenTypegroupcallabler   endError)textencodingiterableposcharrexmatchactionm r   E/var/www/ideatree/venv/lib/python3.10/site-packages/sqlparse/lexer.py
get_tokens   s>   




zLexer.get_tokensN)__name__
__module____qualname____doc__staticmethodr!   r   r   r   r    r      s    r   Nc                 C   s   t  | |S )zTokenize sql.

    Tokenize *sql* using the :class:`Lexer` and return a 2-tuple stream
    of ``(token type, value)`` items.
    )r   r!   )sqlr   r   r   r    tokenizeL   s   r)   r"   )r&   ior   sqlparser   sqlparse.keywordsr   sqlparse.utilsr   r   r)   r   r   r   r    <module>   s   6