o
    %if*                      @   s   d Z ddlZddlmZ ddlZddlZG dd dZ	ddeee	f ded	ee
 d
ee	 fddZ	dde
de	de
d	ee
 d
eee	f f
ddZdee d
dfddZdd ZdS )zJThis is an educational implementation of the byte pair encoding algorithm.    N)Optionalc                   @   s   e Zd Zdedeeef ddfddZdded	ee de	e fd
dZ
de	e defddZde	e defddZde	e de	e fddZedededefddZedd ZdS )SimpleBytePairEncodingpat_strmergeable_ranksreturnNc                C   s0   || _ || _dd | D | _t|| _dS )zCreates an Encoding object.c                 S   s   i | ]\}}||qS  r   ).0token_bytestokenr   r   N/var/www/html/corbot_env/lib/python3.10/site-packages/tiktoken/_educational.py
<dictcomp>       z3SimpleBytePairEncoding.__init__.<locals>.<dictcomp>N)r   r   items_decoderregexcompile_pat)selfr   r   r   r   r   __init__   s   zSimpleBytePairEncoding.__init__colourtext	visualisec                 C   sB   | j |}g }|D ]}|d}t| j||d}|| q
|S )z`Encodes a string into tokens.

        >>> enc.encode("hello world")
        [388, 372]
        utf-8)r   )r   findallencode
bpe_encoder   extend)r   r   r   wordstokensword
word_bytesword_tokensr   r   r   r      s   
zSimpleBytePairEncoding.encoder   c                    s   d  fdd|D S )znDecodes a list of tokens into bytes.

        >>> enc.decode_bytes([388, 372])
        b'hello world'
            c                 3   s    | ]} j | V  qd S Nr   r   r
   r   r   r   	<genexpr>+   s    z6SimpleBytePairEncoding.decode_bytes.<locals>.<genexpr>)joinr   r   r   r&   r   decode_bytes%   s   z#SimpleBytePairEncoding.decode_bytesc                 C   s   |  |jdddS )u   Decodes a list of tokens into a string.

        Decoded bytes are not guaranteed to be valid UTF-8. In that case, we replace
        the invalid bytes with the replacement character "�".

        >>> enc.decode([388, 372])
        'hello world'
        r   replaceerrors)r*   decoder)   r   r   r   r.   -   s   	zSimpleBytePairEncoding.decodec                    s    fdd|D S )zDecodes a list of tokens into a list of bytes.

        Useful for visualising how a string is tokenised.

        >>> enc.decode_tokens_bytes([388, 372])
        [b'hello', b' world']
        c                    s   g | ]} j | qS r   r$   r%   r&   r   r   
<listcomp>@   r   z>SimpleBytePairEncoding.decode_tokens_bytes.<locals>.<listcomp>r   r)   r   r&   r   decode_tokens_bytes8   s   z*SimpleBytePairEncoding.decode_tokens_bytestraining_data
vocab_sizec                 C   s   t | ||d}t||dS )z#Train a BPE tokeniser on some data!)datar2   r   r   r   )	bpe_trainr   )r1   r2   r   r   r   r   r   trainB   s   zSimpleBytePairEncoding.trainc                 C   s$   t | tr
t| } t| j| jdS )Nr4   )
isinstancestrtiktokenget_encodingr   _pat_str_mergeable_ranks)encodingr   r   r   from_tiktokenH   s
   

z$SimpleBytePairEncoding.from_tiktokenr   )__name__
__module____qualname__r8   dictbytesintr   r   listr   r*   r.   r0   staticmethodr6   r>   r   r   r   r   r   
   s     

r   r   r   inputr   r   c           
         s  dd |D }	 |r|dv rt | n|dkrt| d }d }tt|d d |dd  D ]\}} |d |d  }|d urM|d u sI||k rM|}|}q.|d u rSn |d usYJ |d | || ||d   g ||d	 d   }q|rxt   fd
d|D }	|	S )Nc                 S      g | ]}t |gqS r   rD   r   br   r   r   r/   T   r   zbpe_encode.<locals>.<listcomp>Tr   colorsimple   r      c                    s   g | ]} | qS r   r   )r   partr   r   r   r/   q   s    )visualise_tokensprint	enumeratezipget)
r   rH   r   partsmin_idxmin_rankipairrankr   r   rT   r   r   Q   s0   
&2r   r3   r2   r   c                    s  |dk rt di }tdD ]	}||t|g< qdd t|| D }t||k rt  |D ]}t|d d |dd  D ]
} |  d7  < q?q0t	  fddd	}	|	d
 |	d  }
t|}|||
< g }|D ]K}g }d
}|t|d k r|| ||d  f|	kr|
|
 |d7 }n|
||  |d7 }|t|d k sw|t|d kr|
||  |
| qi|}|rtd|	d
  d|	d   td|
 dt| d |dv rtd tdd |d d D  n|dkrtd |d d D ]}t| qtd t||k s*|S )N   z;vocab_size must be at least 256, so we can encode all bytesc                 S   s    g | ]}d d | dD qS )c                 S   rI   r   rJ   rK   r   r   r   r/      r   z(bpe_train.<locals>.<listcomp>.<listcomp>r   )r   )r   r   r   r   r   r/      s    zbpe_train.<locals>.<listcomp>rP   rQ   c                    s    |  S r#   r   )xstatsr   r   <lambda>   s    zbpe_train.<locals>.<lambda>)keyr   rR   z The current most common pair is z + zSo we made z our zth tokenrM   z9Now the first fifty words in our training data look like:c                 S   s   g | ]	}|D ]}|qqS r   r   )r   r   r
   r   r   r   r/          2   rO   z:Now the first twenty words in our training data look like:   
)
ValueErrorrangerD   r   r   lencollectionsCounterrX   maxappendrV   rU   )r3   r2   r   r   ranksr]   r   piecer^   most_common_pairr	   r
   	new_wordsr   new_wordr   rb   r   r5   u   s\   




-r5   token_valuesc                 C   s   dd dD }dd | D }d}d }|D ].}||t |  }||kr2||d t |  }||ks2J |}|t |7 }t|| dd qtd	 d S )
Nc                 S   s   g | ]}d | dqS )z[48;5;mr   )r   r]   r   r   r   r/      s    z$visualise_tokens.<locals>.<listcomp>)         M   P   D      c                 S   s   g | ]	}|j d ddqS )r   r+   r,   )r.   )r   ra   r   r   r   r/      rf   r   rQ    )endz[0m)rl   rV   )rv   
backgroundunicode_token_valuesrunning_length
last_colorr
   rN   r   r   r   rU      s   rU   c                  C   s   d} t td}| }W d    n1 sw   Y  tj|d| d}td |d}||dks5J ||dks>J |	|dd	gksIJ |S )
NzN's|'t|'re|'ve|'m|'ll|'d| ?[\p{L}]+| ?[\p{N}]+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+riX  )r2   r   zJThis is the sequence of merges performed in order to encode 'hello world':zhello worlds   hello worlds   hellos    world)
open__file__readr   r6   rV   r   r.   r*   r0   )gpt2_patternfr3   encr   r   r   r   train_simple_encoding   s   

r   r?   )__doc__rm   typingr   r   r9   r   rC   rD   rE   r8   rF   r   r5   rU   r   r   r   r   r   <module>   s<    H

%

E