import pymysql
import pandas as pd


# 数据库连接配置
db_config = {
    'host': 'localhost',  # 数据库主机地址
    'user': 'root',  # 用户名
    'password': '123456',  # 密码
    'database': 'pachong',  # 数据库名称
}

def method1():
    try:
        # 建立数据库连接
        conn = pymysql.connect(**db_config)
        # SQL查询语句
        sql_query = "SELECT * FROM books"
        # 直接读取为DataFrame
        df = pd.read_sql(sql_query, conn)
        return df

    except Exception as e:
        print(f"数据库操作失败: {e}")
    finally:
        if 'conn' in locals() and conn.open:
            conn.close()

df = method1()

df.head()
df.columns = ['id','标题','作者','出版社','价格','标签','URL','收藏数','推荐数','简介']

missing_info = df.isnull().sum()
duplicated_values = df.duplicated().sum()
print("各列缺失值统计：\n", missing_info)
print("\n数据重复值统计：\n", duplicated_values)

# 遍历 DataFrame 中字符串类型的列
for column in df.select_dtypes(include=['object']).columns:
    # 统计该列中空字符串的数量（包括只含空格的字符串）
    empty_count = df[df[column].str.strip() == ''][column].count()

    # 打印结果
    print(f"属性 '{column}' 中的空字符串数量: {empty_count}")

df.loc[df['作者'].str.strip() == '', '作者'] = '未知'
df.loc[df['简介'].str.strip() == '', '简介'] = '暂无简介'
df.loc[df['出版社'].str.strip() == '', '出版社'] = '未知'


# 计算均值、最大值、最小值（自动忽略NaN）
stats = df[['收藏数', '推荐数', '价格']].agg(['mean', 'max', 'min','count'])
print(stats)

split_tags = df['标签'].fillna('').str.split(',').apply(lambda x: [tag.strip() for tag in x])
tag_counts = split_tags.explode().value_counts()
tag_counts_df = tag_counts.reset_index()
tag_counts_df.columns = ['标签', '出现次数']
print(tag_counts_df)


Class_list = ['作者','出版社']
for i in Class_list:
    class_counts = df[i].value_counts()
    print(f'============={i}的分类统计情况===============')
    print(class_counts)

import jieba
import jieba.analyse
import numpy as np
import pandas as pd
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

# ==================== 常量定义 ====================
PSYCHOLOGY_TERMS = [
    "心理学", "基础心理学", "应用心理学", "发展心理学", "社会心理学",
    "教育心理学", "临床心理学", "认知心理学", "人格心理学", "心理咨询",
    "心理治疗", "认知行为疗法", "精神分析疗法", "正念疗法", "情绪管理",
    "依恋理论", "创伤疗愈", "抑郁", "焦虑", "大五人格", "心理测评", "MBTI",
    "心理", "疗法", "咨询", "治疗", "情绪", "关系", "成长", "心境", "快感",
    "绝望", "自责", "自杀", "失眠", "疲劳", "食欲", "注意", "认知", "归因",
    "图式", "自动", "反刍", "无助", "无望", "疗法", "咨询", "治疗", "药物",
    "电疗", "光照", "激活", "正念", "共病", "复发", "缓解", "康复", "病耻", "支持", "危机"
]  # 核心术语列表

STOPWORDS_PATH = "../../r/stopwords-master/cn_stopwords.txt"  # jieba分词文件路径
TOP_N_TAGS = 3
SIMILARITY_THRESHOLD = 0.25  # 阙值相似度

# 初始化结巴分词
for term in PSYCHOLOGY_TERMS:
    jieba.add_word(term)


# ==================== 核心函数 ====================
def load_enhanced_stopwords(file_path):
    """加载并过滤心理学相关停用词"""
    with open(file_path, 'r', encoding='utf-8') as f:
        stopwords = {line.strip() for line in f}
    return stopwords


def preprocess_text(text):
    """通用文本预处理（兼容空值处理）"""
    if pd.isna(text):
        return ""
    # 保留中文、字母、数字及常用标点
    valid_chars = [c for c in text if c.isalnum() or c in "：，。、《》-"]
    return "".join(valid_chars).strip()


def build_tag_knowledge(df):
    """构建标签向量知识库"""
    # 过滤已分类数据并合并内容
    classified = df[df['标签'] != '未分类'].copy()
    classified['内容'] = classified['标题'] + " " + classified['简介']
    classified['内容'] = classified['内容'].apply(preprocess_text)

    # 训练全局向量化器
    vectorizer = TfidfVectorizer(tokenizer=jieba.lcut_for_search)
    all_texts = classified['内容'].tolist()
    vectorizer.fit(all_texts)

    # 计算各标签的平均向量
    tag_vectors = {}
    for tag, group in classified.groupby('标签'):
        combined_text = " ".join(group['内容'])
        if combined_text:
            tag_vectors[tag] = vectorizer.transform([combined_text])

    return tag_vectors, vectorizer


def recommend_tags(content, tag_vectors, vectorizer):
    """推荐书籍标签核心逻辑"""
    try:
        # 预处理并向量化内容
        processed = preprocess_text(content)
        if not processed:
            return []

        book_vec = vectorizer.transform([processed])
        similarities = {}

        # 计算与各标签的相似度
        for tag, tag_vec in tag_vectors.items():
            sim = cosine_similarity(book_vec, tag_vec)[0][0]
            if sim >= SIMILARITY_THRESHOLD:
                similarities[tag] = sim

        # 优先取高相似度标签
        sorted_tags = sorted(similarities.keys(), key=lambda t: similarities[t], reverse=True)
        if len(sorted_tags) >= TOP_N_TAGS:
            return sorted_tags[:TOP_N_TAGS]

        # 补充关键词标签
        keywords = jieba.analyse.textrank(
            processed, topK=TOP_N_TAGS * 2, allowPOS=('n', 'vn', 'an')
        )
        return (sorted_tags + keywords)[:TOP_N_TAGS]

    except Exception as e:
        print(f"标签推荐异常: {str(e)}")
        return []


# ==================== 主流程 ====================
if __name__ == "__main__":
    # 初始化配置
    stopwords = load_enhanced_stopwords(STOPWORDS_PATH)
    jieba.analyse.set_stop_words(STOPWORDS_PATH)  # 直接使用优化后的停用词

    # 构建知识库
    print("构建标签知识库...")
    tag_vectors, vectorizer = build_tag_knowledge(df)
    print(f"成功构建包含 {len(tag_vectors)} 个标签的知识库")

    # 处理未分类书籍
    uncat_mask = df['标签'] == '未分类'
    df['推荐标签'] = df.apply(
        lambda row: ", ".join(recommend_tags(
            f"{row['标题']} {row['简介']}", tag_vectors, vectorizer
        )) if row['标签'] == '未分类' else row['标签'],
        axis=1
    )

    # 输出统计结果
    tag_distribution = df['推荐标签'].str.split(',').explode().value_counts()
    print("\n标签分布统计:")
    print(tag_distribution.head(10).to_string())

    print(f"\n处理完成！总未分类数: {uncat_mask.sum()}, 成功推荐: {(df['推荐标签'] != '未分类').sum()}")



df['标签'] = df['标签'].mask(df['标签'] == '未分类', df['推荐标签'])


df1 = df.drop('推荐标签',axis=1)
df1


df1['价格'] = df1['价格'].fillna(-1)  # 用特殊值填充
df1['标签'] = df1['标签'].fillna("未分类")

df1.to_csv('data_clear.csv', index=False,encoding='utf-8-sig')



