from collections import defaultdict

import streamlit as st
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import networkx as nx
import pickle
import re
import os
from apriori import load_apriori, getAsso, apriori
from zhipuai import ZhipuAI

API_KEY = "ee1ea463a07a423e934a2b8bc9fc7168.Jz8LPP93GLz9YD0f"
client = ZhipuAI(api_key=API_KEY)

# --- 数据加载 ---
@st.cache_data
def load_data():
    file_path = "招聘数据集(含技能列表）.csv"  # 确保文件路径正确
    encodings = ['utf-8', 'gbk', 'gb18030']
    for enc in encodings:
        try:
            df = pd.read_csv(file_path, encoding=enc)
            break
        except Exception as e:
            print(f"尝试以{enc}编码读取文件时出错: {e}")
            continue
    else:
        st.error("无法读取数据文件，请检查文件路径和编码")
        return None

    column_mapping = {
        'positionName': 'position',
        'workYear': 'experience',
        'education': 'education',
        'companyFullName': 'company'
    }
    return df.rename(columns=column_mapping)


# --- 主函数 ---
def render_skill_graph(df):
    if df is None:
        st.error("数据加载失败，请检查数据文件")
        return

    with st.sidebar:
        st.subheader("选择分析模式")
        analysis_mode = st.radio(
            "分析模式",
            ["技能关联网络", "岗位↔技能分析"],
            help="选择分析模式"
        )

        if analysis_mode == "岗位↔技能分析":
            query_type = st.selectbox(
                "查询类型",
                ["岗位→核心技能", "技能→相关岗位", "技能→关联技能"],
                help="选择查询类型"
            )
            input_label = {
                "岗位→核心技能": "请输入岗位名称",
                "技能→相关岗位": "请输入技能关键词",
                "技能→关联技能": "请输入主技能名称"
            }[query_type]
            search_key = st.text_input(input_label, placeholder="输入关键词")

        st.subheader("算法参数")
        min_support = st.slider("最小支持度", 0.001, 0.1, 0.005, 0.001)
        min_confidence = st.slider("最小置信度", 0.1, 1.0, 0.5, 0.05)

        if st.button("更新分析结果"):
            try:
                compute_association_rules(df, min_support)
                st.toast("规则更新成功", icon="✅")
            except Exception as e:
                st.error(f"规则更新失败: {e}")

    if analysis_mode == "技能关联网络":
        tab1, tab2 = st.tabs(["关联网络图谱", "规则明细表"])
        with tab1:
            show_skill_network()
        with tab2:
            show_rules_table(min_confidence)
    else:
        show_target_analysis(df, query_type, search_key)


# --- 核心功能 ---
def show_target_analysis(df, query_type, keyword):
    if not keyword:
        st.warning("请输入查询关键词")
        return

    try:
        with st.spinner(f"分析 {keyword} 的关联模式..."):
            col1, col2 = st.columns([1, 2])
            with col1:
                st.subheader("分析结果")
                if query_type == "岗位→核心技能":
                    result = get_position_skills(df, keyword)
                    if result:
                        df_result = pd.DataFrame(result.items(), columns=['技能', '出现频次'])
                        st.dataframe(
                            df_result.style.format({'出现频次': '{:.0f}次'})
                            .background_gradient(cmap='Blues'),
                            height=400
                        )
                    else:
                        st.error("未找到相关技能要求")
                        st.write("调试信息：匹配到的岗位数量为0，请检查岗位名称是否正确。")
                elif query_type == "技能→相关岗位":
                    result = get_skill_positions(df, keyword)
                    if result:
                        df_result = pd.DataFrame(result.items(), columns=['岗位', '出现频次'])
                        st.dataframe(
                            df_result.style.format({'出现频次': '{:.0f}次'})
                            .background_gradient(cmap='Blues'),
                            height=400
                        )
                    else:
                        st.error("未找到相关岗位")
                elif query_type == "技能→关联技能":
                    result = get_skill_associations(keyword)
                    if result:
                        df_result = pd.DataFrame(result.items(), columns=['技能', '关联强度'])
                        st.dataframe(
                            df_result.style.format({'关联强度': '{:.2f}'}),
                            height=400
                        )
                    else:
                        st.error("未找到关联技能")

            with col2:
                st.subheader("关联趋势")
                if query_type == "岗位→核心技能":
                    show_position_skill_network(df, keyword)
                elif query_type == "技能→相关岗位":
                    show_skill_position_network(df, keyword)
                elif query_type == "技能→关联技能":
                    show_skill_association_network(df, keyword)

            # 生成专业建议
            st.subheader("💡 专业建议")
            if query_type == "岗位→核心技能":
                prompt = f"根据以下技能要求数据用3条学习路径建议：{result}"
            elif query_type == "技能→相关岗位":
                prompt = f"根据以下岗位分布数据用3条职业发展建议：{result}"
            elif query_type == "技能→关联技能":
                prompt = f"根据以下技能关联数据用3条学习建议：{result}"
            else:
                prompt = "请根据以下数据提供专业建议："

            try:
                response = client.chat.completions.create(
                    model="glm-4",
                    messages=[{"role": "user", "content": prompt}],
                    temperature=0.5
                )
                st.success(response.choices[0].message.content)
            except Exception as e:
                st.error(f"建议生成失败: {str(e)}")

    except Exception as e:
        st.error(f"分析失败: {str(e)}")


# --- Apriori规则计算 ---
def compute_association_rules(df, min_support):
    try:
        column_mapping = {
            '职位名称': 'positionName', '岗位名称': 'positionName', '岗位': 'positionName',
            'Position': 'positionName', 'position': 'positionName',
            'Job Title': 'positionName', 'job_title': 'positionName',
            'skills': 'skill_list', '技能列表': 'skill_list'
        }
        df = df.rename(columns=column_mapping)

        required_columns = ['positionName', 'skill_list']
        missing_cols = [col for col in required_columns if col not in df.columns]
        if missing_cols:
            st.error(f"数据文件缺少必要列：{missing_cols}")
            st.stop()

        df["positionName"] = (
            df["positionName"]
            .astype(str)
            .str.replace(r"[^\u4e00-\u9fa5a-zA-Z0-9]", "", regex=True)
            .str.strip()
            .str.lower()
        )

        processed_skills = []
        for skills in df['skill_list']:
            if pd.isna(skills):
                processed_skills.append([])
                continue
            cleaned = [
                re.sub(r'[^a-zA-Z0-9\u4e00-\u9fa5]', '', s.strip().lower())
                for s in skills.split(',')
                if s.strip()
            ]
            processed_skills.append(cleaned)

        from apriori import apriori
        _, supp_data = apriori(processed_skills, minSupport=min_support)
        rules = [(frozenset(itemset), supp) for itemset, supp in supp_data.items()
                 if supp >= min_support and len(itemset) >= 2]
        with open("apriori.bin", 'wb') as f:
            pickle.dump(rules, f)
        st.toast(f"生成 {len(rules)} 条规则", icon="✅")
    except Exception as e:
        st.error(f"规则计算失败: {str(e)}")
        raise


# --- 可视化 ---
def show_skill_network(focus_skill=None):
    rules = load_association_rules()
    G = nx.Graph()

    for itemset, supp in rules:
        if len(itemset) >= 2:
            skills = list(itemset)
            G.add_edge(skills[0], skills[1], weight=supp * 100)

    if focus_skill:
        neighbors = list(G.neighbors(focus_skill)) + [focus_skill]
        G = G.subgraph(neighbors)

    pos = nx.spring_layout(G)
    edge_trace = []
    for edge in G.edges():
        x0, y0 = pos[edge[0]]
        x1, y1 = pos[edge[1]]
        trace = go.Scatter(
            x=[x0, x1, None], y=[y0, y1, None],
            line=dict(width=G.edges[edge]['weight'] * 0.1, color='#888'),
            hoverinfo='none',
            mode='lines'
        )
        edge_trace.append(trace)

    node_trace = go.Scatter(
        x=[pos[node][0] for node in G.nodes()],
        y=[pos[node][1] for node in G.nodes()],
        mode='markers+text',
        text=list(G.nodes()),
        marker=dict(
            size=25,
            color=['red' if node == focus_skill else '#4B9CD3' for node in G.nodes()]
        ),
        textposition="top center"
    )

    fig = go.Figure(edge_trace + [node_trace])
    fig.update_layout(
        showlegend=False,
        margin=dict(l=0, r=0, t=0, b=0),
        height=600
    )
    st.plotly_chart(fig, use_container_width=True)


def load_association_rules():
    try:
        if not os.path.exists("apriori.bin"):
            st.error("关联规则文件不存在，请先点击「更新分析结果」生成")
            return []
        with open("apriori.bin", 'rb') as f:
            rules = pickle.load(f)
            if not rules:
                st.error("关联规则文件为空，请降低支持度阈值")
            return rules
    except Exception as e:
        st.error(f"文件加载失败: {str(e)}")
        return []


def show_rules_table(min_confidence):
    max_rules = st.sidebar.slider("最大显示规则数", 10, 100, 50, 10)
    rules = load_association_rules()
    rule_list = []

    for itemset, supp in sorted(rules, key=lambda x: x[1], reverse=True):
        if len(itemset) >= 2 and supp >= min_confidence:
            items = list(itemset)
            for i in range(len(items)):
                for j in range(i + 1, len(items)):
                    rule_list.append({
                        '技能A': items[i],
                        '技能B': items[j],
                        '支持度': supp,
                        '置信度': supp
                    })
            if len(rule_list) >= max_rules:
                break

    if rule_list:
        df = pd.DataFrame(rule_list)
        st.dataframe(
            df.sort_values('支持度', ascending=False),
            column_config={
                "支持度": st.column_config.NumberColumn(format="%.3f"),
                "置信度": st.column_config.NumberColumn(format="%.3f")
            },
            height=600,
            use_container_width=True
        )
    else:
        st.warning("未找到符合条件的关联规则")


def show_position_skill_network(df, position_name):
    try:
        skills = get_position_skills(df, position_name)
        if not skills:
            st.warning("无关联技能数据")
            return

        G = nx.Graph()
        G.add_node(position_name, type="岗位", color="#FF6B6B")
        for skill, count in skills.items():
            G.add_node(skill, type="技能", color="#4B9CD3")
            G.add_edge(position_name, skill, weight=count)

        pos = nx.spring_layout(G)
        edge_trace = []
        for edge in G.edges():
            x0, y0 = pos[edge[0]]
            x1, y1 = pos[edge[1]]
            trace = go.Scatter(
                x=[x0, x1, None], y=[y0, y1, None],
                line=dict(width=2, color="#888"),
                hoverinfo="none",
                mode="lines"
            )
            edge_trace.append(trace)

        node_trace = go.Scatter(
            x=[pos[node][0] for node in G.nodes()],
            y=[pos[node][1] for node in G.nodes()],
            mode="markers+text",
            text=[node for node in G.nodes()],
            marker=dict(
                size=25,
                color=[G.nodes[node]["color"] for node in G.nodes()]
            ),
            textposition="top center"
        )

        fig = go.Figure(edge_trace + [node_trace])
        fig.update_layout(
            showlegend=False,
            margin=dict(l=0, r=0, t=0, b=0),
            height=600
        )
        st.plotly_chart(fig, use_container_width=True)

    except Exception as e:
        st.error(f"可视化失败: {str(e)}")


def show_skill_position_network(df, skill_name):
    try:
        positions = get_skill_positions(df, skill_name)
        if not positions:
            st.warning("无关联岗位数据")
            return

        G = nx.Graph()
        G.add_node(skill_name, type="技能", color="#4B9CD3")
        for position, count in positions.items():
            G.add_node(position, type="岗位", color="#FF6B6B")
            G.add_edge(skill_name, position, weight=count)

        pos = nx.spring_layout(G)
        edge_trace = []
        for edge in G.edges():
            x0, y0 = pos[edge[0]]
            x1, y1 = pos[edge[1]]
            trace = go.Scatter(
                x=[x0, x1, None], y=[y0, y1, None],
                line=dict(width=2, color="#888"),
                hoverinfo="none",
                mode="lines"
            )
            edge_trace.append(trace)

        node_trace = go.Scatter(
            x=[pos[node][0] for node in G.nodes()],
            y=[pos[node][1] for node in G.nodes()],
            mode="markers+text",
            text=[node for node in G.nodes()],
            marker=dict(
                size=25,
                color=[G.nodes[node]["color"] for node in G.nodes()]
            ),
            textposition="top center"
        )

        fig = go.Figure(edge_trace + [node_trace])
        fig.update_layout(
            showlegend=False,
            margin=dict(l=0, r=0, t=0, b=0),
            height=600
        )
        st.plotly_chart(fig, use_container_width=True)

    except Exception as e:
        st.error(f"可视化失败: {str(e)}")


def show_skill_association_network(df, skill_name):
    try:
        associations = get_skill_associations(skill_name)
        if not associations:
            st.warning("无关联技能数据")
            return

        G = nx.Graph()
        G.add_node(skill_name, type="技能", color="#4B9CD3")
        for associated_skill, weight in associations.items():
            G.add_node(associated_skill, type="关联技能", color="#6B9BFF")
            G.add_edge(skill_name, associated_skill, weight=weight)

        pos = nx.spring_layout(G)
        edge_trace = []
        for edge in G.edges():
            x0, y0 = pos[edge[0]]
            x1, y1 = pos[edge[1]]
            trace = go.Scatter(
                x=[x0, x1, None], y=[y0, y1, None],
                line=dict(width=G.edges[edge]['weight'] * 10, color="#888"),
                hoverinfo="none",
                mode="lines"
            )
            edge_trace.append(trace)

        node_trace = go.Scatter(
            x=[pos[node][0] for node in G.nodes()],
            y=[pos[node][1] for node in G.nodes()],
            mode="markers+text",
            text=[node for node in G.nodes()],
            marker=dict(
                size=25,
                color=[G.nodes[node]["color"] for node in G.nodes()]
            ),
            textposition="top center"
        )

        fig = go.Figure(edge_trace + [node_trace])
        fig.update_layout(
            showlegend=False,
            margin=dict(l=0, r=0, t=0, b=0),
            height=600
        )
        st.plotly_chart(fig, use_container_width=True)

    except Exception as e:
        st.error(f"可视化失败: {str(e)}")


def get_position_skills(df, position_name):
    try:
        keywords = [kw.strip().lower() for kw in position_name.split() if kw.strip()]
        skill_counter = defaultdict(int)
        matched_rows = 0

        for _, row in df.iterrows():
            current_pos = str(row.get("position", "")).lower()
            if any(kw in current_pos for kw in keywords):
                matched_rows += 1
                if pd.notna(row["skill_list"]):
                    skills = [
                        s.strip().lower()
                        for s in row["skill_list"].split(",")
                        if s.strip()
                    ]
                    for skill in skills:
                        skill_counter[skill] += 1

        if not skill_counter:
            st.error(f"未找到匹配的岗位或技能数据。匹配到 {matched_rows} 行数据，但没有有效的技能信息。")
            return {}

        filtered = {k: v for k, v in skill_counter.items() if v >= 2}
        return dict(sorted(filtered.items(), key=lambda x: x[1], reverse=True)[:5])

    except Exception as e:
        st.error(f"分析失败: {str(e)}")
        return {}


def get_skill_positions(df, skill):
    position_counter = {}
    target_skill = skill.lower()
    for _, row in df.iterrows():
        if pd.notna(row['skill_list']):
            skills = [s.strip().lower() for s in row['skill_list'].split(',')]
            if target_skill in skills:
                pos = row['position']
                position_counter[pos] = position_counter.get(pos, 0) + 1
    return dict(sorted(position_counter.items(), key=lambda x: x[1], reverse=True)[:5])


def get_skill_associations(skill):
    rules = load_association_rules()
    target_skill = skill.lower()
    associations = {}
    for itemset, supp in rules:
        if target_skill in itemset:
            related = [s for s in itemset if s != target_skill]
            for s in related:
                associations[s] = max(associations.get(s, 0), supp)
    return dict(sorted(associations.items(), key=lambda x: x[1], reverse=True)[:5])


# --- 运行应用 ---
df = load_data()
if df is not None:
    render_skill_graph(df)