Spaces:
Running
Running
File size: 3,827 Bytes
783d533 9984001 783d533 9984001 783d533 9984001 783d533 9984001 783d533 9984001 783d533 9984001 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 |
import re
import nltk
from difflib import Differ
from icecream import ic
from app.webui.patch import model_load,num_tokens_in_string,one_chunk_initial_translation, one_chunk_reflect_on_translation, one_chunk_improve_translation
from app.webui.patch import calculate_chunk_size, multichunk_initial_translation, multichunk_reflect_on_translation, multichunk_improve_translation
from llama_index.core.node_parser import SentenceSplitter
nltk.download('punkt', quiet=True)
def tokenize(text):
# Use nltk to tokenize the text
words = nltk.word_tokenize(text)
# Check if the text contains spaces
if ' ' in text:
# Create a list of words and spaces
tokens = []
for word in words:
tokens.append(word)
if not word.startswith("'") and not word.endswith("'"): # Avoid adding space after punctuation
tokens.append(' ') # Add space after each word
return tokens[:-1] # Remove the last space
else:
return words
def diff_texts(text1, text2):
tokens1 = tokenize(text1)
tokens2 = tokenize(text2)
d = Differ()
diff_result = list(d.compare(tokens1, tokens2))
highlighted_text = []
for token in diff_result:
word = token[2:]
category = None
if token[0] == '+':
category = 'added'
elif token[0] == '-':
category = 'removed'
elif token[0] == '?':
continue # Ignore the hints line
highlighted_text.append((word, category))
return highlighted_text
#modified from src.translaation-agent.utils.tranlsate
def translator(
source_lang,
target_lang,
source_text,
country,
max_tokens=1000,
):
"""Translate the source_text from source_lang to target_lang."""
num_tokens_in_text = num_tokens_in_string(source_text)
ic(num_tokens_in_text)
if num_tokens_in_text < max_tokens:
ic("Translating text as single chunk")
#Note: use yield from B() if put yield in function B()
init_translation = one_chunk_initial_translation(
source_lang, target_lang, source_text
)
reflection = one_chunk_reflect_on_translation(
source_lang, target_lang, source_text, init_translation, country
)
final_translation = one_chunk_improve_translation(
source_lang, target_lang, source_text, init_translation, reflection
)
return init_translation, reflection, final_translation
else:
ic("Translating text as multiple chunks")
token_size = calculate_chunk_size(
token_count=num_tokens_in_text, token_limit=max_tokens
)
ic(token_size)
#using sentence splitter
text_parser = SentenceSplitter(
chunk_size=token_size,
)
source_text_chunks = text_parser.split_text(source_text)
translation_1_chunks = multichunk_initial_translation(
source_lang, target_lang, source_text_chunks
)
init_translation = "".join(translation_1_chunks)
reflection_chunks = multichunk_reflect_on_translation(
source_lang,
target_lang,
source_text_chunks,
translation_1_chunks,
country,
)
reflection = "".join(reflection_chunks)
translation_2_chunks = multichunk_improve_translation(
source_lang,
target_lang,
source_text_chunks,
translation_1_chunks,
reflection_chunks,
)
final_translation = "".join(translation_2_chunks)
return init_translation, reflection, final_translation
|