import os import torch from typing import List, Optional, Union, Dict from sentencepiece import SentencePieceProcessor from transformers import PreTrainedTokenizer from transformers.utils import logging, PaddingStrategy from transformers.tokenization_utils_base import EncodedInput, BatchEncoding # The underlying tokenizer, which is the packaging of the SP model class SPTokenizer: def __init__(self, model_path: str): # reload tokenizer assert os.path.isfile(model_path), model_path #Load the SP model as the underlying model self.sp_model = SentencePieceProcessor(model_file=model_path) #Set the number of words, BOS EOS PAD ID attribute # PAD is replaced by UNK of the underlying model self.n_words: int = self.sp_model.vocab_size() self.bos_id: int = self.sp_model.bos_id() self.eos_id: int = self.sp_model.eos_id() self.pad_id: int = self.sp_model.unk_id() assert self.sp_model.vocab_size() == self.sp_model.get_piece_size() # Define special words special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] # Establish a mapping of special word text to ID self.special_tokens = {<!-- -->} # Establish a mapping of special word IDs to text self.index_special_tokens = {<!-- -->} for token in special_tokens: # Traverse special words and fill in these two mappings self.special_tokens[token] = self.n_words self.index_special_tokens[self.n_words] = token self.n_words + = 1 #Convert text fragment to word text array def tokenize(self, s: str): # `EncodeAsPieces` forwarded to the underlying model return self.sp_model.EncodeAsPieces(s) #Convert text fragment to word ID array def encode(self, s: str, bos: bool = False, eos: bool = False) -> List[int]: assert type(s) is str # Call the `encode` method of the underlying model t = self.sp_model.encode(s) # Based on the passed `bos` and `eos` flags #Determine whether to add BOS and EOS IDs ifbos: t = [self.bos_id] + t if eos: t = t + [self.eos_id] return t # Convert word ID array to text fragment def decode(self, t: List[int]) -> str: # Forwarded to the `decode` method of the underlying model return self.sp_model.decode(t) # Convert word text array to text fragment def decode_tokens(self, tokens: List[str]) -> str: text = self.sp_model.DecodePieces(tokens) return text # Convert word text to ID def convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ # If the word is in a special token, look up the ID from `special_tokens` if token in self.special_tokens: return self.special_tokens[token] # Otherwise forward to the `PieceToId` of the underlying model return self.sp_model.PieceToId(token) # Convert word ID to text def convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" # If the word is in a special tag, or is one of BOS, EOS, or PAD, return an empty string if index in self.index_special_tokens or index in [self.eos_id, self.bos_id, self.pad_id] or index < 0: return "" # Otherwise forward to `IdToPiece` of the underlying model return self.sp_model.IdToPiece(index) # Token separator used directly by users class ChatGLMTokenizer(PreTrainedTokenizer): # Define vocabulary name vocab_files_names = {<!-- -->"vocab_file": "tokenizer.model"} #Define model input parameter names model_input_names = ["input_ids", "attention_mask", "position_ids"] def __init__(self, vocab_file, padding_side="left", **kwargs): super().__init__(padding_side=padding_side, clean_up_tokenization_spaces=False, **kwargs) self.name = "GLMTokenizer" # Save the vocabulary path in the attribute # This file is placed together with the vocabulary itself, so the path is just the file name. self.vocab_file = vocab_file #Create the underlying tokenizer and pass in the vocabulary path self.tokenizer = SPTokenizer(vocab_file) # Define special words BOS, EOS, PAD # Create a mapping from word text to ID self.special_tokens = {<!-- --> "<bos>": self.tokenizer.bos_id, "<eos>": self.tokenizer.eos_id, "<pad>": self.tokenizer.pad_id } # Convert special word text to ID def get_command(self, token): # If the word is in the special characters of GLM tokenizer # Find `special_tokens` and return its ID if token in self.special_tokens: return self.special_tokens[token] # If the word is not among the special characters of the underlying SP tokenizer, an error will be reported assert token in self.tokenizer.special_tokens, f"{<!-- -->token} is not a special token for {<!-- -->self.name}" # Find the `special_tokens` of the underlying tokenizer and return its ID return self.tokenizer.special_tokens[token] # Return UNK word text @property def unk_token(self) -> str: return "<unk>" # Return the PAD word text (that is, UNK) @property def pad_token(self) -> str: return "<unk>" # Return PAD word ID @property def pad_token_id(self): return self.get_command("<pad>") # Return EOS word text @property def eos_token(self) -> str: return "</s>" # Return EOS word ID @property def eos_token_id(self): return self.get_command("<eos>") # Return the vocabulary size @property def vocab_size(self): return self.tokenizer.n_words # Get the word list, which is the mapping of word text to ID def get_vocab(self): """ Returns vocab as a dict """ # Traverse the IDs of all words, i.e. 0 to VocabSize-1] # Call its own `_convert_id_to_token` method to convert the ID into text # Create a mapping of word text to ID vocab = {<!-- -->self._convert_id_to_token(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab #Convert text fragment to word text array def _tokenize(self, text, **kwargs): # Forward to the `tokenize` method of the underlying tokenizer return self.tokenizer.tokenize(text) # Convert word text to ID def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ # Forward to the `convert_token_to_id` method of the underlying tokenizer return self.tokenizer.convert_token_to_id(token) # Convert word ID to text def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" # Forward to the `convert_id_to_token` method of the underlying tokenizer return self.tokenizer.convert_id_to_token(index) # Convert word text array to text fragment def convert_tokens_to_string(self, tokens: List[str]) -> str: # Forward to the `decode_tokens` method of the underlying tokenizer return self.tokenizer.decode_tokens(tokens) # Save word list def save_vocabulary(self, save_directory, filename_prefix=None): """ Save the vocabulary and special tokens file to a directory. Args: save_directory (`str`): The directory in which to save the vocabulary. filename_prefix (`str`, *optional*): An optional prefix to add to the named of the saved files. Returns: `Tuple(str)`: Paths to the files saved. """ if os.path.isdir(save_directory): # If the incoming path is a directory, then the file name is the previously defined default file name. # Concatenate the incoming path and file name as the save path vocab_file = os.path.join( save_directory, self.vocab_files_names["vocab_file"] ) else: # Otherwise, the saving path is the incoming path. vocab_file = save_directory # Read the vocabulary based on the vocabulary path in the attribute with open(self.vocab_file, 'rb') as fin: proto_str = fin.read() # Write the word list to the save path with open(vocab_file, "wb") as writer: writer.write(proto_str) # Return to save path return (vocab_file,) # Get the prefix word list, namely GMASK and SOP def get_prefix_tokens(self): prefix_tokens = [self.get_command("[gMASK]"), self.get_command("sop")] return prefix_tokens ''' Build compound questions based on current questions and historical questions and answers In [1]: tokenizer.build_prompt('Q3', [('Q1', 'A1'),('Q2', 'A2')]) Out[1]: '[Round 1]\ \ Question: Q1\ \ Answer: A1\ \ [Round 2]\ \ Question: Q2\ \ Answer: A2\ \ [Round 3]\ \ Question: Q3\ \ Answer:' ''' def build_prompt(self, query, history=None): if history is None: history = [] prompt = "" for i, (old_query, response) in enumerate(history): # Traverse each pair of historical questions and answers, and assemble the serial number, question and answer according to the template # and add it after the compound question prompt + = "[Round {}]\ \ Question: {}\ \ Answer: {}\ \ ".format(i + 1, old_query, response) # Assemble the current round and current question according to the template and add it to the back of the compound question prompt + = "[Round {}]\ \ Question: {}\ \ Answer: ".format(len(history) + 1, query) return prompt # Add special words to the word ID array def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ # Maybe prefix a word list and add it to the front of IDS0 prefix_tokens = self.get_prefix_tokens() token_ids_0 = prefix_tokens + token_ids_0 # If IDS1 exists, add it after IDS0 and add EOS if token_ids_1 is not None: token_ids_0 = token_ids_0 + token_ids_1 + [self.get_command("<eos>")] return token_ids_0 def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults assert self.padding_side == "left" # `encoded_inputs` is a dictionary, `input_ids` contains the input word ID array of the model # `attention_mask` is the mask array, `position_ids` is the position ID array # `required_input` is the input word ID array required_input = encoded_inputs[self.model_input_names[0]] # `seq_length` is the input length seq_length = len(required_input) # If the strategy is to fill by longest, since there is only one input, the maximum length is its length if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) # If maximum length and `pad_to_multiple_of` are provided # Set the maximum length to no less than a multiple of its `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of # If the policy is not no padding and the maximum length # If it is not equal to the input length, padding is required needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # If there is no mask, initialize to an array of all 1s and length SeqLen if "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * seq_length # If there is no position ID, initialize to [0, ..., SeqLen - 1] if "position_ids" not in encoded_inputs: encoded_inputs["position_ids"] = list(range(seq_length)) if needs_to_be_padded: # If padding is required, calculate the number of padding characters, which is the difference between the maximum length and the input difference = max_length - len(required_input) # If a mask exists, insert diff 0s in front of the mask if "attention_mask" in encoded_inputs: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] # If there is a location ID, insert diff 0s in front of it as well. if "position_ids" in encoded_inputs: encoded_inputs["position_ids"] = [0] * difference + encoded_inputs["position_ids"] # Insert diff PAD IDs before the input IDS encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input return encoded_inputs </code><img class="look-more-preCode contentImg-no-view" src="//i2.wp.com/csdnimg.cn/release/blogv2/dist/pc/img/newCodeMoreBlack.png" alt ="" title="">