import os
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk import pos_tag
import nltk


from transformers import pipeline  # 引入 NLP 模型
import logging
import json

# 用于保存所有 subtitle_dict 的列表
all_subtitle_dicts = []

# 加载 GPT 模型
text_generator = pipeline("text-generation", model="gpt2", device=0)  # 使用 GPU

# Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation. 减少输出
logging.getLogger("transformers").setLevel(logging.ERROR)

# 下载NLTK数据
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')

# 扩展停用词
custom_stopwords = set(stopwords.words('english')).union({
    'like', 'oh', 'okay', 'guys', 'right', 'im', 'thats', 'really', 'thing', 'know',
    'well', 'get', 'make', 'going', 'yeah', 'god', 'little', 'dont', 'lot', 'look', 'looks', 'feels'
})


def preprocess_text(text):
    """ 文本预处理：去除标点、转换小写、分词、去除停用词 """
    text = re.sub(r'[^\w\s]', '', text)  # 去除标点
    text = text.lower()
    words = word_tokenize(text)

    # 词性标注（POS tagging）
    tagged_words = pos_tag(words)

    # 只保留名词（NN, NNS, NNPS）和形容词（JJ）
    words = [word for word, tag in tagged_words if tag in ['NN', 'NNS', 'JJ']]

    # 过滤停用词
    words = [word for word in words if word not in custom_stopwords and len(word) > 2]

    return ' '.join(words)

def extract_keywords(text, top_n=10):
    """ 提取关键词 """
    vectorizer = TfidfVectorizer(ngram_range=(1, 2), max_df=1.0, min_df=0.01)  # 进一步调整参数
    tfidf_matrix = vectorizer.fit_transform([text])
    feature_names = vectorizer.get_feature_names_out()
    tfidf_scores = tfidf_matrix.toarray()[0]

    # 获取 top_n 个关键词
    top_keywords = [feature_names[i] for i in tfidf_scores.argsort()[-top_n:][::-1]]
    return top_keywords


def get_output_text(text):
    try:
        # 使用 GPT 模型生成销售话术
        output_text = text_generator(
            f"Convert those words and phrases into sales pitch: {text}",
            max_new_tokens=50,  # 仅使用 max_new_tokens 控制生成的新文本长度
            num_return_sequences=1,
            truncation=True  # 显式启用截断
        )[0]['generated_text']
    except Exception as e:
        print(f"模型生成失败: {e}")
        output_text = "生成失败"  # 生成失败时的默认输出

    output_text = output_text.split("]")[1]
    # print(f"output_text:{output_text}")

    return output_text


input_dir = r'D:\subtitle\bring_goods\2'
# 遍历目录中的所有文件
for file_name in os.listdir(input_dir):
    if file_name.endswith('.srt'):  # 只处理 SRT 文件
        file_path = os.path.join(input_dir, file_name)
        file_title = file_name.split(']')[1].split('[')[0].split('#')[0]    # 提取文件名

        # 读取srt文件，保存为txt文件
        with open(file_path, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            content = []
            for line in lines:
                # 跳过时间戳行、空行以及包含 [Music] 的行
                if '-->' not in line and line.strip() != '' and '[Music]' not in line:
                    # 去除行首的序号（数字后跟空格）
                    line = re.sub(r'^\d+\s+', '', line)
                    content.append(line.strip())

            # 将内容写入txt文件
            with open(os.path.join(input_dir, file_title + '.txt'), 'w', encoding='utf-8') as f_txt:
                f_txt.write(' '.join(content))

            # 读取写入后的txt文件
            with open(os.path.join(input_dir, file_title + '.txt'), 'r', encoding='utf-8') as f_txt:
                content = f_txt.read()
                # print(content)

            # 预处理文本
            preprocessed_text = preprocess_text(content)
            # 提取关键词
            product_features = extract_keywords(preprocessed_text, top_n=15)
            # print("最终优化提取的产品特点：", product_features)
            output_text = get_output_text(product_features)
            subtitle_dict = {
              "instruction": f"[{file_title}]",
              "input": f"{product_features}",
              "output": output_text
            }
            print(subtitle_dict)
            all_subtitle_dicts.append(subtitle_dict)

# 保存所有字典到JSON文件
json_file_path = os.path.join(input_dir, 'bring_goods.json')
with open(json_file_path, 'w', encoding='utf-8') as json_file:
    json.dump(all_subtitle_dicts, json_file, ensure_ascii=False, indent=4)

print(f"所有字幕已保存至 {json_file_path}")

# {
#   "instruction": "Generate a sales script for [Product] targeting American audiences",               为[产品]生成针对美国受众的销售脚本
#   "input": "Product: Wireless Earbuds\nKey Features: 24hr battery, noise-cancelling, sweatproof",    产品：无线耳机\主要特点：24小时电池续航、噪声消除、防汗功能
#   "output": "Yo folks! Check these lit buds - 24hr playtime means they'll outlast your longest workout...   "    嘿，伙计们！看看这些超酷的耳机——24小时播放时间意味着它们能陪你度过最长的锻炼时间…
# }
