import streamlit as st
import gensim.downloader as api
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import matplotlib

# 指定中文字体路径（macOS）
# font_path = "/System/Library/Fonts/PingFang.ttc"
# my_font = fm.FontProperties(fname=font_path)
#
# # 设置 matplotlib 中文字体和负号显示
# matplotlib.rcParams['font.family'] = my_font.get_name()
# matplotlib.rcParams['axes.unicode_minus'] = False

# 设置页面配置
st.set_page_config(page_title="Word2Vec 词向量可视化", layout="wide")

# 页面标题
st.title("🔹 Word2Vec 词向量可视化")
st.caption("作者：何双新 ｜ 功能：Word2Vec 词向量可视化")

@st.cache_resource(show_spinner=True)
def load_model():
    return api.load('word2vec-google-news-300')

# 加载模型
model = load_model()

st.title("📌 Word2Vec 词向量可视化工具")

# 分页
tab1, tab2, tab3, tab4 = st.tabs(["词向量查询", "词语相似度", "类比推理", "向量可视化"])

# --- 1. 词向量查询 ---
with tab1:
    st.header("🔍 查看词向量")
    word = st.text_input("输入一个英文单词：", "computer")
    if st.button("获取词向量"):
        if word in model:
            vec = model[word]
            st.write(f"词向量维度: {vec.shape}")
            st.write("前10个维度的值：", vec[:10])
        else:
            st.error("词不在词汇表中，请尝试其他单词")

# --- 2. 词语相似度 ---
with tab2:
    st.header("🔗 计算词语相似度")
    col1, col2 = st.columns(2)
    with col1:
        word1 = st.text_input("单词1", "computer")
    with col2:
        word2 = st.text_input("单词2", "laptop")

    if st.button("计算相似度"):
        if word1 in model and word2 in model:
            sim = model.similarity(word1, word2)
            st.success(f"'{word1}' 和 '{word2}' 的余弦相似度为：{sim:.4f}")
        else:
            st.error("一个或两个词不在词汇表中")

# --- 3. 类比推理 ---
with tab3:
    st.header("🧠 类比推理（word1 - word2 + word3 ≈ ?）")
    col1, col2, col3 = st.columns(3)
    with col1:
        w1 = st.text_input("词1 (如 king)", "king")
    with col2:
        w2 = st.text_input("词2 (如 man)", "man")
    with col3:
        w3 = st.text_input("词3 (如 woman)", "woman")

    if st.button("进行类比推理"):
        try:
            result = model.most_similar(positive=[w3, w1], negative=[w2], topn=5)
            st.write(f"'{w1}' 之于 '{w2}'，相当于 '{w3}' 之于：")
            for word, score in result:
                st.write(f"- {word}: {score:.4f}")
        except KeyError as e:
            st.error(f"词汇错误: {e}")

# --- 4. 向量可视化 ---
with tab4:
    st.header("📈 词向量可视化（PCA降维）")
    words_input = st.text_area(
        "输入一组英文单词，用逗号分隔（如：king,queen,man,woman,computer）",
        "king, queen, man, woman, computer, banana, apple, orange, prince, princess"
    )
    raw_words = [w.strip() for w in words_input.split(",")]
    words = [w for w in raw_words if w in model]
    skipped = [w for w in raw_words if w not in model]

    if st.button("生成可视化图像"):
        if len(words) < 2:
            st.warning("请至少输入两个词，并确保它们在词汇表中")
        else:
            vectors = [model[w] for w in words]
            pca = PCA(n_components=2)
            reduced = pca.fit_transform(vectors)

            fig, ax = plt.subplots(figsize=(12, 8))
            ax.set_facecolor("#f9f9f9")  # 浅色背景
            ax.grid(True, linestyle='--', alpha=0.5)  # 显示网格线

            # 绘制点
            ax.scatter(reduced[:, 0], reduced[:, 1],
                       color="#1f77b4", s=100, alpha=0.7, edgecolor='k', linewidth=0.5)

            # 标签字体大小适配数量
            font_size = max(8, 14 - len(words) // 5)

            # 添加标签
            for i, word in enumerate(words):
                ax.annotate(word, xy=(reduced[i, 0], reduced[i, 1]),
                            fontsize=font_size, fontproperties=my_font,
                            xytext=(5, 2), textcoords='offset points',
                            bbox=dict(boxstyle='round,pad=0.3', edgecolor='gray', facecolor='white', alpha=0.6),
                            arrowprops=dict(arrowstyle='->', color='gray', lw=0.5))

            ax.set_title("📌 Word2Vec 词向量 2D 可视化 (PCA)", fontsize=18, pad=15)
            st.pyplot(fig)

            if skipped:
                st.info(f"以下词不在模型词汇表中，已跳过：{', '.join(skipped)}")

            # 自动分析生成说明
            st.subheader("🧠 分析说明")
            explanation = []

            # 简单聚类分析：找最近的词对
            from scipy.spatial.distance import euclidean

            pairs = []
            for i in range(len(words)):
                for j in range(i + 1, len(words)):
                    dist = euclidean(reduced[i], reduced[j])
                    pairs.append(((words[i], words[j]), dist))
            pairs.sort(key=lambda x: x[1])

            top_similar = pairs[:3]  # 取最相近的三个词对
            for (w1, w2), dist in top_similar:
                explanation.append(f"🔹 **{w1}** 和 **{w2}** 在图中非常接近，表明它们在语义上可能较为相关（距离约为 {dist:.2f}）。")

            # 计算均值中心，找偏离大的词（即“异类”）
            center = reduced.mean(axis=0)
            dists_to_center = [(words[i], euclidean(reduced[i], center)) for i in range(len(words))]
            dists_to_center.sort(key=lambda x: x[1], reverse=True)
            outlier_word, max_dist = dists_to_center[0]
            explanation.append(f"🔸 **{outlier_word}** 与其他词的平均距离最大（约为 {max_dist:.2f}），可能表示它语义上偏离较远。")

            # 输出说明
            for line in explanation:
                st.markdown(line)

st.caption("安徽智加数字科技有限公司 · 技术学习组出品 🚀")
