from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
from modelscope.models import Model
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline as trans_pipeline
import spacy
from spacy.tokens import Span


# 提取名词短语的函数
def extract_relevant_noun_phrases(nlp, text):
    doc = nlp(text)

    def extend_chunk(chunk):
        end = chunk.end
        # 扩展名词短语，处理关系代词、介词短语、修饰成分等
        while end < len(doc):
            token = doc[end]

            # 处理关系子句：继续扩展以包含完整的修饰子句
            if token.dep_ in ['relcl', 'acl'] or token.tag_ in ['WDT', 'WP', 'WRB']:
                end += 1
                while end < len(doc) and (
                        doc[end].dep_ in ['aux', 'advmod', 'acomp', 'xcomp', 'ccomp', 'prep', 'pobj', 'conj', 'dobj'] or
                        doc[end].pos_ in ['VERB', 'ADP', 'ADJ', 'ADV']):
                    end += 1
            # 处理介词短语：确保包括类似于“in black”的修饰部分
            elif token.dep_ == 'prep':
                end += 1
                while end < len(doc) and doc[end].dep_ in ['pobj', 'amod', 'advmod', 'conj']:
                    end += 1
            # 若既不是关系从句也不是介词短语，则结束扩展
            else:
                break

        return doc[chunk.start:end]

    def is_valid_chunk(chunk):
        # 验证名词短语的有效性
        if len(chunk) == 1 and chunk[0].pos_ in ['PRON', 'DET']:
            return False
        if chunk[0].text.lower() in ['robot', 'assistant', 'robots', 'assistants']:
            return False
        if any(token.ent_type_ in ['TIME', 'DATE'] for token in chunk):
            return False
        if chunk[-1].dep_ == 'prep':
            return False
        return True

    extended_chunks = [extend_chunk(chunk) for chunk in doc.noun_chunks if is_valid_chunk(chunk)]

    # 去除重复和嵌套的短语
    final_chunks = []
    for chunk in sorted(extended_chunks, key=lambda x: len(x), reverse=True):
        if not any(chunk.start >= c.start and chunk.end <= c.end for c in final_chunks):
            final_chunks.append(chunk)

    return [chunk.text for chunk in final_chunks]


# 封装类
class AudioTransfer:
    # 类变量，存储已经初始化的模型
    speech_recognition_model = None
    translation_model = None
    nlp = None

    def __init__(self):
        # 只在第一次实例化时初始化模型
        if AudioTransfer.speech_recognition_model is None:
            AudioTransfer.speech_recognition_model = self.initialize_speech_recognition_model()
        if AudioTransfer.translation_model is None:
            AudioTransfer.translation_model = self.initialize_translation_model()
        if AudioTransfer.nlp is None:
            AudioTransfer.nlp = self.initialize_tokenization_model()

        # 实例变量引用类变量
        self.speech_recognition_model = AudioTransfer.speech_recognition_model
        self.translation_model = AudioTransfer.translation_model
        self.nlp = AudioTransfer.nlp

    def initialize_speech_recognition_model(self):
        # 初始化语音识别模型
        model = Model.from_pretrained('/root/paraformer', disable_update=True,update_model=False,revision="v2.0.4")
        inference_pipeline = pipeline(task=Tasks.auto_speech_recognition,disable_update=True,update_model=False,model=model)
        return inference_pipeline

    def initialize_translation_model(self):
        # 加载翻译模型和分词器
        model_name = '/root/opus-mt-zhen/'
        model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
        tokenizer = AutoTokenizer.from_pretrained(model_name)

        # 创建翻译管道
        translation_pipeline = trans_pipeline("translation_zh_to_en", model=model, tokenizer=tokenizer, device=4)
        return translation_pipeline

    def initialize_tokenization_model(self):
        # 使用 spacy 初始化分词模型
        nlp = spacy.load("en_core_web_sm")
        return nlp

    def predict(self, file):
        # 语音识别
        recognized_text = self.recognize_speech(file)
        print(f"Recognized Speech: {recognized_text}")

        # 翻译
        translated_text = self.translate_text(recognized_text)
        print(f"Translated Text: {translated_text}")  # 打印翻译结果

        # 分词
        tokenized_text = self.tokenize_text(translated_text)

        return tokenized_text

    def recognize_speech(self, file):
        result = self.speech_recognition_model(file)
        recognized_text = result[0].get('text')  # 假设返回结构中包含 'text' 字段
        return recognized_text

    def translate_text(self, text):
        # 使用翻译模型对文本进行翻译
        translated_text = self.translation_model(text, max_length=400)[0]['translation_text']
        return translated_text

    def tokenize_text(self, text):
        # 使用 spacy 对翻译后的文本进行分词处理
        relevant_phrases = extract_relevant_noun_phrases(self.nlp, text)
        relevant_phrases_str = ','.join(relevant_phrases)
        return relevant_phrases_str

# audio_transfer = AudioTransfer()
#
# result = audio_transfer.translate_text('请帮我找到正在坐着的人')
# tokenized_text = audio_transfer.tokenize_text(result)
# print(result)
# print(tokenized_text)