Better tokenizing code for AuraFlow.

This commit is contained in:
comfyanonymous
2024-07-12 01:08:45 -04:00
parent b6f09cf649
commit 29c2e26724
5 changed files with 25 additions and 1175 deletions

View File

@@ -0,0 +1,22 @@
import os
class LLAMATokenizer:
@staticmethod
def from_pretrained(path):
return LLAMATokenizer(path)
def __init__(self, tokenizer_path):
import sentencepiece
self.tokenizer = sentencepiece.SentencePieceProcessor(model_file=tokenizer_path)
self.end = self.tokenizer.eos_id()
def get_vocab(self):
out = {}
for i in range(self.tokenizer.get_piece_size()):
out[self.tokenizer.id_to_piece(i)] = i
return out
def __call__(self, string):
out = self.tokenizer.encode(string)
out += [self.end]
return {"input_ids": out}