import pandas as pd
import numpy as np
import re
import jieba
import jieba.posseg as psg
from datetime import datetime

# 加载数据
data = pd.read_excel('data.xlsx')


# 去除评价中的非中文部分
def remove_non_chinese(text):
    return re.sub(r'[^\u4e00-\u9fa5]', '', text)


data['评价'] = data['评价'].apply(remove_non_chinese)


# 规范化评论时间格式
def normalize_date(date_str):
    try:
        date = pd.to_datetime(date_str)
        return date.strftime('%Y-%m-%d')
    except:
        return date_str


data['评论时间'] = data['评论时间'].apply(normalize_date)

# 根据用户ID删除重复值
data.drop_duplicates(subset=['用户ID'], inplace=True)


# 将空白值替换为对应属性已存在值的随机值
def fill_missing_with_random(data):
    for column in data.columns:
        if data[column].isnull().sum() > 0:
            non_missing_values = data[column].dropna().unique()
            data[column] = data[column].apply(lambda x: np.random.choice(non_missing_values) if pd.isnull(x) else x)
    return data


data = fill_missing_with_random(data)

# 保存处理后的数据
data.to_excel('cleaned_data.xlsx', index=False)

print("数据清洗完毕")

# 数据分词
text_list1 = []
text_list2 = []
# 去停用词表
del_list = ['景区', '景色', '人', '好', '的']
for i in range(len(data)):
    text1 = jieba.lcut(data["评价"].iloc[i])  # 分词
    for j in range(len(text1)):
        if text1[j] not in del_list:  # 去停用词
            text_list1.append(text1[j])  # 存入列表
    # 有词性分词
    text2 = psg.lcut(data["评价"].iloc[i])
    text_list2.append(text2)

# 分词列表
with open('text_list1.txt', 'w', encoding='utf-8') as f:
    for item in text_list1:
        f.write(item + " ")

# 词性列表
with open('text_list2.txt', 'w', encoding='utf-8') as f:
    for item in text_list2:
        f.write(str(item))
        f.write('\n')

print("分词结果导出完成")
