# -*- coding: utf-8 -*-
"""
工单编号: 大数据-用户画像-19-阿里电商搜索
电商搜索算法实现
目标: 构建商品搜索排序模型，优化淘宝搜索GMV转化
"""

import pandas as pd
import numpy as np
import jieba
import re
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from rank_bm25 import BM25Okapi
from gensim.models import Word2Vec
from sklearn.model_selection import train_test_split
import xgboost as xgb
import lightgbm as lgb
from transformers import BertTokenizer, BertModel
import torch
import torch.nn as nn
from sentence_transformers import SentenceTransformer
from sklearn.metrics import ndcg_score
import matplotlib.pyplot as plt

# -------------------- 1. 数据加载与预处理 --------------------
print("Step 1: 数据加载与预处理...")
corpus = pd.read_csv('corpus.tsv', sep='\t', names=['doc_id', 'title'], header=None)
train_query = pd.read_csv('train.query.txt', sep=' ', names=['query_id', 'query'], header=None)
qrels_train = pd.read_csv('qrels.train.tsv', sep=' ', names=['query_id', 'doc_id'], header=None)
dev_query = pd.read_csv('dev.query.txt', sep=' ', names=['query_id', 'query'], header=None)

# 合并训练数据
train_data = pd.merge(qrels_train, train_query, on='query_id')
train_data = pd.merge(train_data, corpus, on='doc_id')

# 添加正负样本 (负采样)
np.random.seed(42)
neg_samples = []
for qid in train_data['query_id'].unique():
    pos_docs = set(train_data[train_data['query_id'] == qid]['doc_id'])
    all_docs = set(corpus['doc_id'])
    neg_docs = list(all_docs - pos_docs)
    sampled_neg = np.random.choice(neg_docs, size=min(3, len(neg_docs)), replace=False)
    query = train_data[train_data['query_id'] == qid]['query'].values[0]
    for doc_id in sampled_neg:
        title = corpus[corpus['doc_id'] == doc_id]['title'].values[0]
        neg_samples.append({'query_id': qid, 'query': query, 'doc_id': doc_id, 'title': title, 'label': 0})

neg_df = pd.DataFrame(neg_samples)
train_data['label'] = 1
train_data = pd.concat([train_data, neg_df]).reset_index(drop=True)

# -------------------- 2. 文本预处理 --------------------
print("Step 2: 文本预处理...")
# 加载停用词
stopwords = set([line.strip() for line in open('stopwords.txt', encoding='utf-8')])


def clean_text(text):
    """清洗文本"""
    text = re.sub(r'[^\w\s]', '', text)  # 去除非字母数字字符
    text = re.sub(r'\d+', '', text)  # 去除数字
    return text.strip()


def chinese_seg(text):
    """中文分词+清洗"""
    text = clean_text(text)
    words = [word for word in jieba.cut(text) if word not in stopwords and len(word) > 1]
    return ' '.join(words)


# 应用预处理
train_data['query_seg'] = train_data['query'].apply(chinese_seg)
train_data['title_seg'] = train_data['title'].apply(chinese_seg)
corpus['title_seg'] = corpus['title'].apply(chinese_seg)

# -------------------- 3. 特征工程 --------------------
print("Step 3: 特征提取...")

# 3.1 TF-IDF特征
tfidf = TfidfVectorizer(max_features=5000)
tfidf.fit(train_data['query_seg'].tolist() + train_data['title_seg'].tolist())

train_data['tfidf_sim'] = train_data.apply(
    lambda x: cosine_similarity(
        tfidf.transform([x['query_seg']]),
        tfidf.transform([x['title_seg']])
    )[0][0], axis=1
)

# 3.2 BM25特征
corpus_seg = [doc.split() for doc in corpus['title_seg']]
bm25 = BM25Okapi(corpus_seg)
docid_to_idx = {doc_id: i for i, doc_id in enumerate(corpus['doc_id'])}

train_data['bm25_score'] = train_data.apply(
    lambda x: bm25.get_scores(x['query_seg'].split())[docid_to_idx[x['doc_id']]],
    axis=1
)

# 3.3 Word2Vec特征
sentences = [doc.split() for doc in train_data['title_seg']]
w2v_model = Word2Vec(sentences, vector_size=100, window=5, min_count=1, workers=4)


def w2v_similarity(query, title):
    query_vec = np.mean([w2v_model.wv[word] for word in query.split() if word in w2v_model.wv], axis=0)
    title_vec = np.mean([w2v_model.wv[word] for word in title.split() if word in w2v_model.wv], axis=0)
    return cosine_similarity([query_vec], [title_vec])[0][0] if query_vec.size > 0 and title_vec.size > 0 else 0


train_data['w2v_sim'] = train_data.apply(
    lambda x: w2v_similarity(x['query_seg'], x['title_seg']),
    axis=1
)


# 3.4 统计特征
def get_overlap_features(q_tokens, t_tokens):
    q_set, t_set = set(q_tokens), set(t_tokens)
    overlap = q_set & t_set
    return {
        'jaccard': len(overlap) / (len(q_set) + len(t_set) - len(overlap)) if (len(q_set) + len(t_set)) > 0 else 0,
        'contain_ratio': len(overlap) / len(q_set) if len(q_set) > 0 else 0,
        'length_ratio': len(q_tokens) / len(t_tokens) if len(t_tokens) > 0 else 0
    }


stats_features = train_data.apply(
    lambda x: get_overlap_features(x['query_seg'].split(), x['title_seg'].split()),
    axis=1
)
stats_df = pd.DataFrame(stats_features.tolist())
train_data = pd.concat([train_data, stats_df], axis=1)

# 3.5 BERT语义特征 (简化版，实际应用建议微调)
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
bert_model = BertModel.from_pretrained('bert-base-chinese')


def get_bert_embedding(text):
    inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=64)
    with torch.no_grad():
        outputs = bert_model(**inputs)
    return outputs.last_hidden_state[:, 0, :].numpy()


# 由于BERT计算较慢，这里只取部分样本演示
sample_data = train_data.sample(1000, random_state=42)
sample_data['bert_sim'] = sample_data.apply(
    lambda x: cosine_similarity(
        get_bert_embedding(x['query']),
        get_bert_embedding(x['title'])
    )[0][0],
    axis=1
)

# 合并回原始数据
train_data = train_data.merge(sample_data[['query_id', 'doc_id', 'bert_sim']],
                              on=['query_id', 'doc_id'], how='left')
train_data['bert_sim'] = train_data['bert_sim'].fillna(0)

# -------------------- 4. 模型训练 --------------------
print("Step 4: 模型训练...")

# 准备特征和标签
features = ['tfidf_sim', 'bm25_score', 'w2v_sim', 'jaccard', 'contain_ratio', 'length_ratio', 'bert_sim']
X = train_data[features]
y = train_data['label']

# 划分训练集和验证集
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

# 4.1 XGBoost模型
xgb_model = xgb.XGBClassifier(
    objective='binary:logistic',
    n_estimators=500,
    max_depth=6,
    learning_rate=0.05,
    subsample=0.8,
    colsample_bytree=0.8,
    random_state=42
)
xgb_model.fit(X_train, y_train,
              eval_set=[(X_val, y_val)],
              early_stopping_rounds=20,
              verbose=10)

# 4.2 LightGBM模型
lgb_model = lgb.LGBMClassifier(
    num_leaves=31,
    max_depth=6,
    learning_rate=0.05,
    n_estimators=500,
    subsample=0.8,
    colsample_bytree=0.8,
    random_state=42
)
lgb_model.fit(X_train, y_train,
              eval_set=[(X_val, y_val)],
              early_stopping_rounds=20,
              verbose=10)

# 4.3 神经匹配模型 (Sentence-BERT)
sbert_model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')


# 由于神经模型训练耗时，这里仅演示预测过程
def sbert_predict(query, title):
    query_emb = sbert_model.encode(query)
    title_emb = sbert_model.encode(title)
    return cosine_similarity([query_emb], [title_emb])[0][0]


# -------------------- 5. 模型评估 --------------------
print("Step 5: 模型评估...")


def evaluate_model(model, X, y, model_name):
    y_pred = model.predict_proba(X)[:, 1]

    # 计算MRR@10
    query_ids = X.reset_index()['query_id']
    results = pd.DataFrame({'query_id': query_ids, 'score': y_pred, 'label': y})

    mrr_scores = []
    for qid in results['query_id'].unique():
        q_results = results[results['query_id'] == qid]
        q_results = q_results.sort_values('score', ascending=False).reset_index(drop=True)
        for i, row in q_results.iterrows():
            if row['label'] == 1 and i < 10:
                mrr_scores.append(1.0 / (i + 1))
                break
        else:
            mrr_scores.append(0)

    mrr = np.mean(mrr_scores)
    print(f"{model_name} MRR@10: {mrr:.4f}")

    # 计算NDCG@10
    ndcg = ndcg_score([y], [y_pred], k=10)
    print(f"{model_name} NDCG@10: {ndcg:.4f}")

    return mrr, ndcg


# 评估各模型
print("\n=== 验证集评估 ===")
xgb_mrr, xgb_ndcg = evaluate_model(xgb_model, X_val, y_val, "XGBoost")
lgb_mrr, lgb_ndcg = evaluate_model(lgb_model, X_val, y_val, "LightGBM")

# -------------------- 6. 模型优化 --------------------
print("\nStep 6: 模型优化...")

# 6.1 第一轮优化 - 特征增强
print("优化轮次1: 添加商品类目特征")


# 假设我们从标题中提取类目关键词
def extract_category(title):
    if '手机壳' in title:
        return 'phone_case'
    elif '衣服' in title or '外套' in title:
        return 'clothing'
    elif '杯子' in title:
        return 'cup'
    else:
        return 'others'


train_data['category'] = train_data['title'].apply(extract_category)
train_data = pd.get_dummies(train_data, columns=['category'])

# 重新训练模型
features += ['category_clothing', 'category_cup', 'category_others', 'category_phone_case']
X = train_data[features]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)

xgb_model.fit(X_train, y_train, eval_set=[(X_val, y_val)], early_stopping_rounds=20)
print("优化后XGBoost评估:")
xgb_mrr_v1, xgb_ndcg_v1 = evaluate_model(xgb_model, X_val, y_val, "XGBoost(v1)")

# 6.2 第二轮优化 - 模型集成
print("\n优化轮次2: 模型集成")


class EnsembleModel:
    def __init__(self, model1, model2):
        self.model1 = model1
        self.model2 = model2

    def predict_proba(self, X):
        proba1 = self.model1.predict_proba(X)[:, 1]
        proba2 = self.model2.predict_proba(X)[:, 1]
        return np.column_stack([1 - (proba1 + proba2) / 2, (proba1 + proba2) / 2])


ensemble = EnsembleModel(xgb_model, lgb_model)
print("集成模型评估:")
ens_mrr, ens_ndcg = evaluate_model(ensemble, X_val, y_val, "Ensemble")

# 6.3 第三轮优化 - 神经模型微调
print("\n优化轮次3: 微调Sentence-BERT")


# 注: 实际微调需要准备训练数据，这里仅展示框架
def prepare_sbert_data(train_data, sample_size=10000):
    pos_samples = train_data[train_data['label'] == 1]
    neg_samples = train_data[train_data['label'] == 0]

    samples = pd.concat([
        pos_samples.sample(min(sample_size // 2, len(pos_samples))),
        neg_samples.sample(min(sample_size // 2, len(neg_samples)))
    ])

    return list(zip(samples['query'], samples['title'], samples['label']))


# 实际微调代码 (示例):
# train_samples = prepare_sbert_data(train_data)
# train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=16)
# sbert_model.fit(train_dataloader, epochs=1, warmup_steps=100)

# -------------------- 7. 模型部署 --------------------
print("\nStep 7: 模型部署...")
from flask import Flask, request, jsonify
import joblib

# 保存最佳模型
joblib.dump(ensemble, 'search_ensemble_model.pkl')
joblib.dump(tfidf, 'tfidf_vectorizer.pkl')
joblib.dump(bm25, 'bm25_model.pkl')
w2v_model.save('word2vec.model')

# 创建Flask应用
app = Flask(__name__)

# 加载模型
loaded_ensemble = joblib.load('search_ensemble_model.pkl')
loaded_tfidf = joblib.load('tfidf_vectorizer.pkl')
loaded_bm25 = joblib.load('bm25_model.pkl')
loaded_w2v = Word2Vec.load('word2vec.model')
sbert_model = SentenceTransformer('paraphrase-multilingual-MiniLM-L12-v2')


@app.route('/predict', methods=['POST'])
def predict():
    data = request.json
    query = data['query']
    title = data['title']

    # 预处理
    query_seg = chinese_seg(query)
    title_seg = chinese_seg(title)

    # 特征提取
    features = {
        'tfidf_sim': cosine_similarity(loaded_tfidf.transform([query_seg]),
                                       loaded_tfidf.transform([title_seg]))[0][0],
        'bm25_score': loaded_bm25.get_scores(query_seg.split())[docid_to_idx[data['doc_id']]],
        'w2v_sim': w2v_similarity(query_seg, title_seg),
        **get_overlap_features(query_seg.split(), title_seg.split()),
        'bert_sim': cosine_similarity(sbert_model.encode([query]),
                                      sbert_model.encode([title]))[0][0]
    }

    # 添加类目特征
    features.update({
        'category_clothing': 1 if '衣服' in title or '外套' in title else 0,
        'category_cup': 1 if '杯子' in title else 0,
        'category_others': 1 if not any(kw in title for kw in ['衣服', '外套', '杯子', '手机壳']) else 0,
        'category_phone_case': 1 if '手机壳' in title else 0
    })

    # 转换为DataFrame
    features_df = pd.DataFrame([features])

    # 预测
    score = loaded_ensemble.predict_proba(features_df)[0, 1]

    return jsonify({
        'query': query,
        'title': title,
        'relevance_score': float(score),
        'features': features
    })


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)

# -------------------- 8. 结果可视化 --------------------
print("\nStep 8: 生成优化报告...")
# 创建优化过程图表
plt.figure(figsize=(10, 5))
models = ['XGBoost', 'XGBoost(v1)', 'Ensemble']
mrrs = [xgb_mrr, xgb_mrr_v1, ens_mrr]
ndcgs = [xgb_ndcg, xgb_ndcg_v1, ens_ndcg]

x = range(len(models))
plt.bar(x, mrrs, width=0.4, label='MRR@10')
plt.bar([i + 0.4 for i in x], ndcgs, width=0.4, label='NDCG@10')
plt.xticks([i + 0.2 for i in x], models)
plt.title('Model Optimization Progress')
plt.legend()
plt.savefig('optimization_progress.png')
plt.close()

print("""
=== 优化报告 ===
1. 基础模型:
   - XGBoost: MRR@10={:.4f}, NDCG@10={:.4f}

2. 第一轮优化(添加类目特征):
   - XGBoost: MRR@10={:.4f} (+{:.2f}%), NDCG@10={:.4f} (+{:.2f}%)

3. 第二轮优化(模型集成):
   - Ensemble: MRR@10={:.4f} (+{:.2f}%), NDCG@10={:.4f} (+{:.2f}%)

优化可视化结果已保存为 optimization_progress.png
""".format(
    xgb_mrr, xgb_ndcg,
    xgb_mrr_v1, ((xgb_mrr_v1 - xgb_mrr) / xgb_mrr) * 100, xgb_ndcg_v1, ((xgb_ndcg_v1 - xgb_ndcg) / xgb_ndcg) * 100,
    ens_mrr, ((ens_mrr - xgb_mrr) / xgb_mrr) * 100, ens_ndcg, ((ens_ndcg - xgb_ndcg) / xgb_ndcg) * 100
))

print("部署服务已启动，访问 http://localhost:5000/predict 进行测试")