import pandas as pd
import json
import nltk
import jieba
import re
from gensim.models import KeyedVectors
import mmap
import os
import tempfile
import time
from tqdm import tqdm
"""""


这是第一种基于词向量相似度比较方法进行广告风控的方法，其中词向量模型为训练好的具有45000个中文词的模型，可以替换其他模型（同时给出了自己训练的模型）。


"""""
threshold_value=0.88 #此阈值可以由阈值计算程序得出，也可以自行选择
# 记录开始时间
start_time = time.time()
# 读取txt文件路径
txt_file_path = r"D:\实习\获得违禁词20240703.txt"

# 读取txt文件，并解析每行JSON数据
data_list = []
with open(txt_file_path, 'r', encoding='utf-8') as file:
    for line in file:
        data = json.loads(line.strip())
        data_list.append(data['data'])

# 创建DataFrame
df = pd.DataFrame(data_list)

# 打印DataFrame的列名以检查是否包含所需的列
print("DataFrame columns:", df.columns)

# 选择需要保存的列
try:
    df_filtered = df[['text_list', 'vio_text', 'image_url']]
except KeyError as e:
    print(f"Error: {e}")
    print("Available columns:", df.columns)

# 将数据保存到Excel文件
excel_filename = 'output_data.xlsx'


print(f"数据已保存到 {excel_filename}")


# 读取Excel文件
file_path = 'output_data.xlsx'
df = pd.read_excel(file_path)


first_column = df['text_list']


# 定义只保留中文字符的函数
def extract_chinese(text):
    if isinstance(text, str):
        pattern = re.compile(r'[\u4e00-\u9fa5]+')
        chinese_text = ''.join(pattern.findall(text))
        return chinese_text
    else:
        return text


# 对第一列数据应用数据清洗函数
cleaned_first_column = first_column.apply(extract_chinese)

# 将清洗后的第一列数据替换回原DataFrame
df['text_list'] = cleaned_first_column

# 将清洗后的数据保存到新的Excel文件
output_file_path = "cleaned_data.xlsx"
df.to_excel(output_file_path, index=False)

print(f"Cleaned data saved to {output_file_path}")






print(f"下面对数据进行分类，请耐心等待！")

# 加载预训练的中文Word2Vec模型


model_path = r"D:\实习\word2vec_model2.txt"

# 使用内存映射加载词向量模型
with open(model_path, 'r+b') as f:
    # 获取文件大小
    size = os.path.getsize(model_path)
    # 创建内存映射
    mmapped_file = mmap.mmap(f.fileno(), length=size, access=mmap.ACCESS_READ)
    # 将内存映射内容写入临时文件
    with tempfile.NamedTemporaryFile(delete=False) as temp_file:
        temp_file.write(mmapped_file[:])
        temp_model_path = temp_file.name

# 加载词向量模型
word_vectors = KeyedVectors.load_word2vec_format(temp_model_path, binary=False)

# 读取Excel文件
text_excel_file = "cleaned_data.xlsx"
comparison_excel_file = r"D:\实习\数据集.xlsx"

# 读取文本数据和比较词汇数据
text_df = pd.read_excel(text_excel_file)
comparison_df = pd.read_excel(comparison_excel_file)

# 获取第一列数据
texts_to_compare = text_df.iloc[:, 0].tolist()

# 获取比较词汇列表
comparison_words = comparison_df.iloc[:, 0].tolist()

def filter_words(words):
    filtered_words = []
    for word in words:
        # 使用正则表达式过滤非中文字符
        if re.match(r'^[\u4e00-\u9fa5]+$', word) and word not in ["恭喜", "获得"]:
            filtered_words.append(word)
    return filtered_words

def calculate_highest_similarity(text, comparison_words):
    max_similarity = 0
    most_similar_word_from_text = None
    most_similar_word_from_comparison = None
    words = jieba.lcut(text)  # 使用jieba进行分词
    words = filter_words(words)  # 过滤非中文字符和特定词汇
    found = 0
    for word1 in words:
        for word2 in comparison_words:
            try:
                similarity = word_vectors.similarity(word1, word2)
                if similarity > max_similarity:
                    max_similarity = similarity
                    most_similar_word_from_text = word1
                    most_similar_word_from_comparison = word2
                if max_similarity >= threshold_value:
                    found = 1
                    break
            except KeyError:
                # 处理词汇不在词汇表中的情况
                continue
        if found==1:
            break
    return max_similarity, most_similar_word_from_text, most_similar_word_from_comparison

# 逐个处理每个文本并保存最高相似度值和对应的词
max_similarities = []
most_similar_words_from_text = []
most_similar_words_from_comparison = []
categories = []

for text in tqdm(texts_to_compare):
    index = texts_to_compare.index(text)
    similarity_score, word_from_text, word_from_comparison = calculate_highest_similarity(text, comparison_words)
    max_similarities.append(similarity_score)
    most_similar_words_from_text.append(word_from_text if word_from_text else "无")
    most_similar_words_from_comparison.append(word_from_comparison if word_from_comparison else "无")
    # 根据相似度设置类别
    category = 1 if similarity_score > threshold_value else 0
    categories.append(category)

# 将结果保存到DataFrame
text_df['最高相似度'] = max_similarities
text_df['最高相似度文本词'] = most_similar_words_from_text
text_df['最高相似度比较词'] = most_similar_words_from_comparison
text_df['类别'] = categories


# 提取类别为1的文本词，并插入到已有的训练集Excel表中
new_data_rows = set()
for text in texts_to_compare:
    similarity_score, word_from_text, _ = calculate_highest_similarity(text, comparison_words)
    if similarity_score > threshold_value and similarity_score<0.99:
        new_data_rows.add(word_from_text)

# 将新数据插入到已有的训练集Excel表中
if new_data_rows:
    new_data_df = pd.DataFrame(new_data_rows, columns=['专有名词'])
    existing_training_data_file = r"D:\实习\数据集.xlsx"
    existing_training_df = pd.read_excel(existing_training_data_file)
    updated_training_df = pd.concat([existing_training_df, new_data_df], ignore_index=True)
    updated_training_df.to_excel(existing_training_data_file, index=False)
    print(f"已将新数据插入到 {existing_training_data_file}")

# 关闭内存映射
mmapped_file.close()

# 将结果保存到新的Excel文件
output_excel_file = r"D:\实习\model2.xlsx"
text_df.to_excel(output_excel_file, index=False)

# 删除临时文件
os.remove(temp_model_path)

# 输出结果
print(text_df)

end_time = time.time()
elapsed_time = end_time - start_time
print(f"代码运行时间: {elapsed_time:.2f} 秒")