import pandas as pd
import re
import jieba
from gensim.models import KeyedVectors
from tqdm import tqdm  # 引入 tqdm
import json


data_list = []

# 打开txt文件并逐行解析JSON对象
with open(r"C:\Users\zlsjJSSA\Desktop\获得违禁词20240703.txt",'r', encoding='utf-8') as file:
    for line in file:
        try:
            info = json.loads(line.strip())

            # 提取信息
            text_list = info['data']['text_list']
            vio_text = info['data']['vio_text']
            image_url = info['data']['image_url']

            # 添加到列表中
            data_list.append({
                'text_list': text_list,
                'vio_text': vio_text,
                'image_url': image_url
            })

        except json.JSONDecodeError as e:
            print(f"Error decoding JSON: {e}")
            continue

# 创建DataFrame
df = pd.DataFrame(data_list)

# 写入Excel文件
df.to_excel(r"C:\Users\zlsjJSSA\Desktop\违禁词获得.xlsx",index=False)

# 读取Excel文件
file_path = r"C:\Users\zlsjJSSA\Desktop\违禁词获得.xlsx"
df = pd.read_excel(file_path)

# 提取第一列数据，假设第一列的列名为'text_list'
first_column = df['text_list']

# 定义只保留中文字符的函数
def extract_chinese(text):
    if isinstance(text, str):
        pattern = re.compile(r'[\u4e00-\u9fa5]+')
        chinese_text = ''.join(pattern.findall(text))
        return chinese_text
    else:
        return text

# 对第一列数据应用数据清洗函数
cleaned_first_column = first_column.apply(extract_chinese)

# 将清洗后的第一列数据替换回原DataFrame
df['text_list'] = cleaned_first_column

# 将清洗后的数据保存回Excel文件
output_file_path = r"C:\Users\zlsjJSSA\Desktop\违禁词获得.xlsx"
df.to_excel(output_file_path, index=False)

print(f"Cleaned data saved to {output_file_path}")
# 加载预训练的中文Word2Vec模型，该代码所用的可在https://ai.tencent.com/ailab/nlp/en/download.html下载
model_path = r"C:\Users\zlsjJSSA\Downloads\tencent-ailab-embedding-zh-d100-v0.2.0-s\tencent-ailab-embedding-zh-d100-v0.2.0-s\tencent-ailab-embedding-zh-d100-v0.2.0-s.txt"
word_vectors = KeyedVectors.load_word2vec_format(model_path, binary=False)

# 读取Excel文件
dataset_file = r"C:\Users\zlsjJSSA\Desktop\数据集.xlsx"
text_list_file = r"C:\Users\zlsjJSSA\Desktop\违禁词获得.xlsx"

# 读取数据集和text_list数据
dataset = pd.read_excel(dataset_file)
text_list_data = pd.read_excel(text_list_file)

# 将text_list列转换为字符串类型
text_list_data['text_list'] = text_list_data['text_list'].astype(str)

# 提取数据集中的专有名词为一个列表
special_terms = dataset.iloc[:, 0].tolist()

# 定义函数，用于在text_list中查找专有名词
def find_special_terms(text):
    found_terms = [term for term in special_terms if term in text]
    return ', '.join(found_terms) if found_terms else '无'

# 添加一列来存储相同的词语
tqdm.pandas(desc="步骤一进度")  # 添加进度显示
text_list_data['Found Special Terms'] = text_list_data['text_list'].progress_apply(find_special_terms)

# 将结果输出到Excel文件中或其他格式
output_step1_file = r"C:\Users\zlsjJSSA\Desktop\步骤一.xlsx"
text_list_data.to_excel(output_step1_file, index=False)
print(f"Updated text_list with special terms saved to '{output_step1_file}'")

# 获取第一列数据，假设第一列是需要处理的列
texts_to_compare = text_list_data.iloc[:, 0].tolist()

# 获取比较词汇列表
comparison_words = dataset.iloc[:, 0].tolist()

def filter_words(words):
    filtered_words = []
    for word in words:
        # 使用正则表达式过滤非中文字符
        if re.match(r'^[\u4e00-\u9fa5]+$', word) and word not in ["恭喜", "获得", "视频", "礼包", "会员", "飞起", "钻石", "左下角"]:
            filtered_words.append(word)
    return filtered_words

def calculate_highest_similarity(text, comparison_words):
    max_similarity = 0
    most_similar_word_from_text = None
    most_similar_word_from_comparison = None
    words = jieba.lcut(text)  # 使用jieba进行分词
    words = filter_words(words)  # 过滤非中文字符和特定词汇
    for word1 in words:
        for word2 in comparison_words:
            try:
                similarity = word_vectors.similarity(word1, word2)
                if similarity > max_similarity:
                    max_similarity = similarity
                    most_similar_word_from_text = word1
                    most_similar_word_from_comparison = word2
            except KeyError:
                # 处理词汇不在词汇表中的情况
                continue
    return max_similarity, most_similar_word_from_text, most_similar_word_from_comparison

# 创建存放结果的DataFrame
result_df = pd.DataFrame(columns=['文本', '最高相似度', '最高相似度文本词', '最高相似度比较词', '相似度标签'])

# 处理每个文本并保存结果
tqdm.pandas(desc="步骤二进度")  # 添加进度显示
for text in tqdm(texts_to_compare, desc="处理文本进度"):
    # 检查是否有相同词
    found_special_terms = find_special_terms(text)
    if found_special_terms != '无':
        # 若有相同词，则设定最高相似度为1，文本词和比较词为第一个相同词，相似度标签为1
        result_df = result_df.append({
            '文本': text,
            '最高相似度': 1,
            '最高相似度文本词': found_special_terms.split(',')[0],
            '最高相似度比较词': found_special_terms.split(',')[0],
            '相似度标签': 1
        }, ignore_index=True)
    else:
        # 若无相同词，则计算最高相似度，文本词和比较词，并根据相似度是否大于等于0.7设定相似度标签
        similarity_score, word_from_text, word_from_comparison = calculate_highest_similarity(text, comparison_words)
        similarity_label = 1 if similarity_score >= 0.7 else 0
        result_df = result_df.append({
            '文本': text,
            '最高相似度': similarity_score,
            '最高相似度文本词': word_from_text if word_from_text else '无',
            '最高相似度比较词': word_from_comparison if word_from_comparison else '无',
            '相似度标签': similarity_label
        }, ignore_index=True)

# 将相似度大于等于0.9的最高相似度文本词加入到数据集中，并去除重复项，加入条件可调节
high_similarity_terms = result_df[result_df['最高相似度'] >= 0.9]['最高相似度文本词'].tolist()
dataset['专有名词'] = dataset['专有名词'].astype(str).apply(lambda x: x.strip())  # 去除前后空格
dataset = dataset.append(pd.DataFrame(set(high_similarity_terms), columns=['专有名词']), ignore_index=True)
dataset.drop_duplicates(subset=['专有名词'], keep='first', inplace=True)

# 保存更新后的数据集到Excel文件
output_updated_dataset_file = r"C:\Users\zlsjJSSA\Desktop\数据集.xlsx"
dataset.to_excel(output_updated_dataset_file, index=False)
print(f"Updated dataset saved to '{output_updated_dataset_file}'")

# 将结果保存到新的Excel文件
output_step2_file = r"C:\Users\zlsjJSSA\Desktop\步骤二.xlsx"
result_df.to_excel(output_step2_file, index=False)
print(f"Processed data saved to '{output_step2_file}'")
