import pygtrans
import time
import re

class TranslatePool:

    def __init__(self, proxies=None, frequency_limit=60, sentence_limit=2000):
        self.client = pygtrans.Translate(proxies=proxies)
        self.frequency_limit = frequency_limit
        self.sentence_limit = sentence_limit
        self.last_request_time = 0

    def split_sentences(self, text):
        sentences = re.split(r'([。！？.?!])', text)
        chunks = []
        current_chunk = ""
        for i in range(0, len(sentences), 2):
            sentence = sentences[i] + (sentences[i+1] if i+1 < len(sentences) else "")
            if len(current_chunk) + len(sentence) > self.sentence_limit:
                chunks.append(current_chunk)
                current_chunk = sentence
            else:
                current_chunk += sentence
        if current_chunk:
            chunks.append(current_chunk)
        return chunks

    def ensure_frequency_limit(self):
        time_since_last_request = time.time() - self.last_request_time
        sleep_duration = max(0, 60 / self.frequency_limit - time_since_last_request)
        if sleep_duration > 0:
            time.sleep(sleep_duration)
        self.last_request_time = time.time()

    def translate_sync(self, text, target='zh'):
        if len(text) > self.sentence_limit:
            texts = self.split_sentences(text)
            translations = []
            for t in texts:
                self.ensure_frequency_limit()
                translation = self.client.translate(t, target=target)
                translations.append(translation.translatedText)
            return ''.join(translations)
        else:
            self.ensure_frequency_limit()
            result = self.client.translate(text, target=target)
            return result.translatedText

    def detect_language(self, text):
        return self.client.detect(text).language


if __name__ == "__main__":

    s = """
    Classical Chinese is a gateway to the rich heritage and wisdom of ancient
China, yet its complexities pose formidable comprehension barriers for most
modern people without specialized knowledge. While Large Language Models (LLMs)
have shown remarkable capabilities in Natural Language Processing (NLP), they
struggle with Classical Chinese Understanding (CCU), especially in
data-demanding and knowledge-intensive tasks. In response to this dilemma, we
propose TongGu (mean understanding ancient and modern), the first
CCU-specific LLM, underpinned by three core contributions. First, we construct
a two-stage instruction-tuning dataset ACCN-INS derived from rich classical
Chinese corpora, aiming to unlock the full CCU potential of LLMs. Second, we
propose Redundancy-Aware Tuning (RAT) to prevent catastrophic forgetting,
enabling TongGu to acquire new capabilities while preserving its foundational
knowledge. Third, we present a CCU Retrieval-Augmented Generation (CCU-RAG)
technique to reduce hallucinations based on knowledge-grounding. Extensive
experiments across 24 diverse CCU tasks validate TongGu's superior ability,
underscoring the effectiveness of RAT and CCU-RAG. The model and dataset will
be public available.
Automatic summarization of legal case judgements, which are known to be long
and complex, has traditionally been tried via extractive summarization models.
In recent years, generative models including abstractive summarization models
and Large language models (LLMs) have gained huge popularity. In this paper, we
explore the applicability of such models for legal case judgement
summarization. We applied various domain specific abstractive summarization
models and general domain LLMs as well as extractive summarization models over
two sets of legal case judgements from the United Kingdom (UK) Supreme Court
and the Indian (IN) Supreme Court and evaluated the quality of the generated
summaries. We also perform experiments on a third dataset of legal documents of
a different type, Government reports from the United States (US). Results show
that abstractive summarization models and LLMs generally perform better than
the extractive methods as per traditional metrics for evaluating summary
quality. However, detailed investigation shows the presence of inconsistencies
and hallucinations in the outputs of the generative models, and we explore ways
to reduce the hallucinations and inconsistencies in the summaries. Overall, the
investigation suggests that further improvements are needed to enhance the
reliability of abstractive models and LLMs for legal case judgement
summarization. At present, a human-in-the-loop technique is more suitable for
performing manual checks to identify inconsistencies in the generated
summaries.
    """

    # Usage example
    proxies = {'https': 'http://localhost:7890'}
    translate_pool = TranslatePool(proxies=proxies)

    # Synchronous translation
    # result = translate_pool.translate_sync('Look at these pictures and answer the questions.')
    result = translate_pool.translate_sync(s)
    print(result)



    # input()

