import csv
import os
import json
import numpy as np
import pandas as pd
from datetime import datetime
from django.conf import settings
from django.http import JsonResponse, HttpResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required, user_passes_test
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from django.db import transaction
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.hashers import make_password, check_password
from django.contrib.auth.models import User
from django.contrib import messages
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from openai import OpenAI
from .models import Survey, Question, Option, UserProfile
from .forms import UserRegistrationForm
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
import logging
import re
import jieba
from django.contrib.auth.models import User
from django.shortcuts import redirect, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test

# 配置 DeepSeek API 客户端
client = OpenAI(
    base_url='https://tbnx.plus7.plus/v1',
    api_key=settings.DEEPSEEK_API_KEY
)

# 设置日志
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)



# 只有超级用户或管理员才能删除用户，避免权限问题
def is_superuser(user):
    return user.is_superuser

@user_passes_test(is_superuser)
def delete_user(request, user_id):
    if request.method == 'POST':
        user = get_object_or_404(User, id=user_id)
        if user.is_superuser:
            messages.error(request, '不能删除超级用户！')
        else:
            user.delete()
            messages.success(request, f'用户 {user.username} 已删除。')
    return redirect('admin_user_management')  # 重定向回用户管理页面

@login_required
@require_http_methods(["POST"])
def save_survey(request):
    try:
        with transaction.atomic():
            data = json.loads(request.body)
            survey_id = data.get('id')
            if survey_id:
                survey = Survey.objects.get(id=survey_id, created_by=request.user)
                survey.name = data['name']
                survey.description = data.get('description', '')
                survey.save()
            else:
                survey = Survey.objects.create(
                    name=data['name'],
                    description=data.get('description', ''),
                    created_by=request.user
                )

            existing_questions = set()
            for q_data in data['questions']:
                question_id = q_data.get('id')
                if question_id:
                    question = Question.objects.get(id=question_id, survey=survey)
                    question.text = q_data['text']
                    question.type = q_data['type']
                    question.order = q_data['order']
                    question.required = q_data.get('required', True)
                    question.save()
                else:
                    question = Question.objects.create(
                        survey=survey,
                        text=q_data['text'],
                        type=q_data['type'],
                        order=q_data['order'],
                        required=q_data.get('required', True)
                    )

                existing_questions.add(question.id)

                if question.type in ['single', 'multiple']:
                    existing_options = set()
                    for o_data in q_data.get('options', []):
                        option_id = o_data.get('id')
                        if option_id:
                            option = Option.objects.get(id=option_id, question=question)
                            option.text = o_data['text']
                            option.order = o_data['order']
                            option.save()
                        else:
                            Option.objects.create(
                                question=question,
                                text=o_data['text'],
                                order=o_data['order']
                            )
                        existing_options.add(option.id if option_id else option.id)

                    Option.objects.filter(question=question).exclude(id__in=existing_options).delete()

            Question.objects.filter(survey=survey).exclude(id__in=existing_questions).delete()

            return JsonResponse({
                'success': True,
                'survey_id': survey.id,
                'message': '问卷已保存'
            })
    except Exception as e:
        return JsonResponse({'success': False, 'error': str(e)}, status=400)

@login_required
def publish_survey(request, survey_id):
    survey = get_object_or_404(Survey, id=survey_id, created_by=request.user)
    if not survey.questions.exists():
        return JsonResponse({'success': False, 'error': '发布失败：问卷必须包含至少一个问题'}, status=400)
    survey.status = 'published'
    survey.save()
    return JsonResponse({
        'success': True,
        'url': request.build_absolute_uri(f'/survey/{survey.id}/'),
        'message': '问卷已成功发布'
    })

@login_required
def home(request):
    all_files = [f for f in os.listdir(settings.DATA_DIR) if f.endswith('.csv')]
    context = {'files': all_files, 'user': request.user}
    return render(request, 'index.html', context)

@csrf_exempt
def survey_create(request):
    if request.method == 'POST':
        try:
            data = json.loads(request.body)
            survey = Survey.objects.create(
                name=data.get('title', '未命名问卷'),
                created_by=request.user,
                status='published'
            )
            for q in data.get('questions', []):
                question = Question.objects.create(
                    survey=survey,
                    text=q.get('text', ''),
                    type=q.get('type', 'single')
                )
                if q['type'] != 'text':
                    for option in q.get('options', []):
                        Option.objects.create(question=question, text=option)
            return JsonResponse({'id': survey.id}, status=201)
        except Exception as e:
            return JsonResponse({'error': str(e)}, status=400)
    return render(request, 'survey_create.html')


@login_required
def survey_list(request):
    if request.user.is_superuser:
        surveys = Survey.objects.all()
    else:
        surveys = Survey.objects.filter(created_by=request.user)

    # 获取筛选和排序参数
    name_filter = request.GET.get('name', '').strip()
    creator_filter = request.GET.get('creator', '').strip()
    order = request.GET.get('order', 'created_at_desc')

    # 应用筛选
    if name_filter:
        surveys = surveys.filter(name__icontains=name_filter)
    if creator_filter:
        surveys = surveys.filter(created_by__username__icontains=creator_filter)

    # 应用排序
    if order == 'created_at_asc':
        surveys = surveys.order_by('created_at')
    else:  # 默认倒序
        surveys = surveys.order_by('-created_at')

    context = {
        'surveys': surveys,
        'name_filter': name_filter,
        'creator_filter': creator_filter,
        'order': order
    }
    return render(request, 'survey_list.html', context)


@login_required
@require_http_methods(["POST"])
@csrf_exempt
def run_pipeline(request):
    try:
        uploaded_file = request.FILES.get('file')
        if not uploaded_file or not uploaded_file.name.endswith('.csv'):
            return JsonResponse({"status": "error", "message": "仅支持CSV文件"}, status=400)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        unique_filename = f"{timestamp}_{uploaded_file.name}"
        save_path = os.path.join(settings.DATA_DIR, unique_filename)

        os.makedirs(os.path.dirname(save_path), exist_ok=True)
        with open(save_path, 'wb+') as destination:
            for chunk in uploaded_file.chunks():
                destination.write(chunk)

        request.session['pipeline_filename'] = unique_filename
        request.session['pipeline_step'] = 'upload'
        request.session.modified = True

        file_path = save_path
        df = pd.read_csv(file_path)
        columns_to_drop = ['答题序号', '开始时间', '提交时间', '答题时长', 'IP省份', 'IP城市', 'IP地址', '浏览器', '操作系统', 'Q5|open']
        df.drop(columns=columns_to_drop, inplace=True, errors='ignore')
        df.dropna(inplace=True)
        df.drop(columns=['Q1', '来源'], inplace=True, errors='ignore')

        processed_filename = f"processed_{timestamp}_{uploaded_file.name}"
        processed_file_path = os.path.join(settings.DATA_DIR, processed_filename)
        df.to_csv(processed_file_path, index=False)
        request.session['pipeline_filename'] = processed_filename
        request.session['pipeline_step'] = 'preprocess'
        request.session.modified = True

        df = pd.read_csv(processed_file_path)
        selected_features = ['Q10', 'Q12', 'Q13', 'Q14']
        df.dropna(subset=selected_features, inplace=True)
        z_scores = np.abs((df[selected_features] - df[selected_features].mean()) / df[selected_features].std())
        df = df[(z_scores < 3).all(axis=1)]

        scaler = StandardScaler()
        df_normalized = scaler.fit_transform(df[selected_features])

        pca = PCA(n_components=2)
        df_pca = pca.fit_transform(df_normalized)

        silhouette_scores = []
        cluster_range = range(2, 11)
        for k in cluster_range:
            kmeans = KMeans(n_clusters=k, random_state=42)
            clusters = kmeans.fit_predict(df_pca)
            silhouette_avg = silhouette_score(df_pca, clusters)
            silhouette_scores.append(silhouette_avg)

        best_k = cluster_range[silhouette_scores.index(max(silhouette_scores))]
        kmeans_final = KMeans(n_clusters=best_k, random_state=42)
        clusters_final = kmeans_final.fit_predict(df_pca)

        cluster_centers_pca_space = kmeans_final.cluster_centers_
        cluster_centers_original_space = pca.inverse_transform(cluster_centers_pca_space)
        cluster_centers_original_scale = scaler.inverse_transform(cluster_centers_original_space)
        df['Cluster'] = clusters_final
        df.to_csv(processed_file_path, index=False)

        final_silhouette_score = silhouette_score(df_pca, clusters_final) if len(set(clusters_final)) > 1 else None

        request.session[f'cluster_centers_{processed_filename}'] = cluster_centers_original_scale.tolist()
        request.session[f'clusters_final_{processed_filename}'] = clusters_final.tolist()
        request.session['clustering_filename'] = processed_filename
        request.session['pipeline_step'] = 'analysis'
        request.session.modified = True

        progress_steps = [
            {"step": "upload", "message": "文件上传成功", "duration": 2000},
            {"step": "preprocess", "message": "数据预处理完成", "duration": 3000},
            {"step": "analysis", "message": "聚类分析完成", "duration": 4000},
            {"step": "results", "message": "分析流程完成", "duration": 2000}
        ]

        return JsonResponse({
            "status": "success",
            "message": "分析流程完成",
            "filename": processed_filename,
            "cluster_centers": cluster_centers_original_scale.tolist(),
            "final_silhouette_score": final_silhouette_score,
            "step": "results",
            "progress_steps": progress_steps
        })
    except Exception as e:
        request.session['pipeline_step'] = 'error'
        request.session.modified = True
        return JsonResponse({"status": "error", "message": str(e)}, status=500)

def user_login(request):
    if request.method == "POST":
        username = request.POST['username']
        password = request.POST['password']
        user = authenticate(request, username=username, password=password)
        if user is not None:
            login(request, user)
            return redirect('home')
        else:
            return render(request, 'login.html', {'error': '用户名或密码错误'})
    return render(request, 'login.html')

@login_required
def user_logout(request):
    logout(request)
    return redirect('login')

def admin_required(view_func):
    decorated_view_func = login_required(user_passes_test(lambda u: u.is_superuser)(view_func))
    return decorated_view_func

@login_required
def get_cluster_centers(request):
    if request.method == 'POST':
        try:
            filename = request.session.get('clustering_filename')
            cluster_centers = request.session.get(f'cluster_centers_{filename}', [])
            clusters_final = request.session.get(f'clusters_final_{filename}', [])
            if not cluster_centers or not clusters_final:
                return JsonResponse({"error": "请先执行聚类分析"}, status=400)
            result = {"cluster_centers": cluster_centers, "final_clusters": clusters_final}
            return JsonResponse(result)
        except Exception as e:
            return JsonResponse({"error": "服务器内部错误，请联系管理员"}, status=500)
    return JsonResponse({"error": "无效请求"}, status=400)

@login_required
def cluster_results(request):
    try:
        filename = request.GET.get('filename') or request.session.get('clustering_filename')
        if not filename:
            raise ValueError("未找到有效的文件名参数")
        file_path = os.path.join(settings.DATA_DIR, filename)
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"无法找到文件 {filename}")
        cluster_centers = request.session.get(f'cluster_centers_{filename}', [])
        clusters_final = request.session.get(f'clusters_final_{filename}', [])
        if not cluster_centers or not clusters_final:
            raise ValueError("未找到聚类分析结果，请先执行分析")


        # 修复1：将描述列表转换为字典，键为聚类索引
        cluster_descriptions_dict = {
            0: '被动知晓型 (用户1)',
            1: '低价值观望型 (用户2)',
            2: '低价值观望型(用户3)',
            3: '冲动型购买者 (用户4)',
            4: '价格敏感者 (用户5)',
            5: '被动知晓型 (用户6)',
            6: '高潜力转化型 (用户7)',
            7: '稳定支持者 (用户8)',
            8: '高认知高参与型 (用户9)'
        }
        num_clusters = len(cluster_centers)


        # 打印调试信息
        print(f"聚类数量: {num_clusters}")
        print(f"聚类描述字典: {cluster_descriptions_dict}")

        data = pd.read_csv(file_path)
        column_mapping = {
            "Q1": "是否使用抖音", "Q2": "性别", "Q3": "年龄", "Q4": "学历", "Q5": "职业", "Q6": "月收入",
            "Q7": "月均网购次数", "Q8": "每日观看抖音时间", "Q9": "是否关注种草者", "Q10": "对扫地机器人的了解程度",
            "Q11": "是否有购买意愿", "Q12": "是否愿意尝试购买", "Q13": "对扫地机器人的认知", "Q14": "是否愿意参与宣传",
            # 矩阵量表题 Q15-Q27
            "Q15|R1": "种草者是否具备知识", "Q15|R2": "种草者是否受过教育", "Q15|R3": "种草者是否有经验",
            "Q16|R1": "种草者影响力", "Q16|R2": "种草者社会声望", "Q17|R1": "接受推荐", "Q17|R2": "互动交流",
            "Q17|R3": "关注动态", "Q18|R1": "个性相似", "Q18|R2": "兴趣相似", "Q18|R3": "选择相似",
            "Q19|R1": "推荐信息有参考价值", "Q19|R2": "全面介绍产品", "Q19|R3": "详细讲解特点",
            "Q20|R1": "图文结合", "Q20|R2": "视频讲解", "Q20|R3": "现场演示", "Q21|R1": "推荐他人使用",
            "Q21|R2": "推荐有效提升购买意愿", "Q22|R1": "内容生动有趣", "Q22|R2": "观看视频令人放松",
            "Q23|R1": "因直播特价购买", "Q23|R2": "因限量抢购购买", "Q23|R3": "因大额优惠券购买",
            "Q24|R1": "种草者亲身体验", "Q24|R2": "推荐无偏见", "Q24|R3": "推荐较可靠",
            "Q25|R1": "物有所值", "Q25|R2": "能买到想要的产品", "Q25|R3": "正确决策",
            "Q26|R1": "观看时感到快乐", "Q26|R2": "观看时满怀期待", "Q26|R3": "观看时感到满意",
            "Q27|R1": "愿意购买", "Q27|R2": "愿意推荐他人"
        }
        option_mapping = {
            "Q1": {1: "是", 2: "否"}, "Q2": {1: "男", 2: "女"}, "Q3": {1: "20岁以下", 2: "21-30岁", 3: "31-40岁", 4: "41-50岁", 5: "51岁以上"},
            "Q4": {1: "初中以下", 2: "高中/职校", 3: "大学专科", 4: "大学本科", 5: "硕士", 6: "博士及以上"},
            "Q5": {1: "学生", 2: "公务员", 3: "企业管理者", 4: "职员", 5: "专业人员", 6: "普通工人", 7: "商业职工", 8: "个体经营者", 9: "自由职业者", 10: "农林牧渔", 11: "退休", 12: "无业", 13: "其他"},
            "Q6": {1: "1500元以下", 2: "1500-2000元", 3: "2000-3000元", 4: "3000-5000元", 5: "5000元以上"},
            "Q7": {1: "0次", 2: "1-2次", 3: "3-5次", 4: "6-9次", 5: "10次以上"},
            "Q8": {1: "15分钟以下", 2: "15-30分钟", 3: "30-60分钟", 4: "60分钟以上"},
            "Q9": {1: "有过", 2: "没有", 3: "正在了解"}, "Q10": {1: "十分了解", 2: "基本了解", 3: "一知半解", 4: "基本不了解", 5: "没听说过"},
            "Q11": {1: "有过", 2: "没有", 3: "正在了解"}, "Q12": {1: "愿意", 2: "可以尝试", 3: "看别人使用情况", 4: "不愿意"},
            "Q13": {1: "方便利民，意义重大", 2: "大费周章，毫无用途", 3: "没有作用", 4: "不了解"},
            "Q14": {1: "非常愿意", 2: "可以尝试", 3: "看别人去不去", 4: "不愿意"},
            "Q15|R1": {1: "赞同", 2: "不赞同", 3: "中立"}, "Q15|R2": {1: "赞同", 2: "不赞同", 3: "中立"}, "Q15|R3": {1: "赞同", 2: "不赞同", 3: "中立"},
            "Q16|R1": {1: "赞同", 2: "不赞同", 3: "中立"}, "Q16|R2": {1: "赞同", 2: "不赞同", 3: "中立"},
            "Q17|R1": {1: "是", 2: "否"}, "Q17|R2": {1: "是", 2: "否"}, "Q17|R3": {1: "是", 2: "否"},
            "Q18|R1": {1: "是", 2: "否"}, "Q18|R2": {1: "是", 2: "否"}, "Q18|R3": {1: "是", 2: "否"},
            "Q19|R1": {1: "是", 2: "否"}, "Q19|R2": {1: "是", 2: "否"}, "Q19|R3": {1: "是", 2: "否"},
            "Q20|R1": {1: "是", 2: "否"}, "Q20|R2": {1: "是", 2: "否"}, "Q20|R3": {1: "是", 2: "否"},
            "Q21|R1": {1: "是", 2: "否"}, "Q21|R2": {1: "是", 2: "否"}, "Q22|R1": {1: "是", 2: "否"}, "Q22|R2": {1: "是", 2: "否"},
            "Q23|R1": {1: "是", 2: "否"}, "Q23|R2": {1: "是", 2: "否"}, "Q23|R3": {1: "是", 2: "否"},
            "Q24|R1": {1: "是", 2: "否"}, "Q24|R2": {1: "是", 2: "否"}, "Q24|R3": {1: "是", 2: "否"},
            "Q25|R1": {1: "是", 2: "否"}, "Q25|R2": {1: "是", 2: "否"}, "Q25|R3": {1: "是", 2: "否"},
            "Q26|R1": {1: "是", 2: "否"}, "Q26|R2": {1: "是", 2: "否"}, "Q26|R3": {1: "是", 2: "否"},
            "Q27|R1": {1: "是", 2: "否"}, "Q27|R2": {1: "是", 2: "否"}
        }
        data.rename(columns=column_mapping, inplace=True)
        for col, mapping in option_mapping.items():
            col_ch = column_mapping.get(col, col)
            if col_ch in data.columns:
                data[col_ch] = data[col_ch].map(mapping)

        # 选取数值与非数值列（排除 Cluster）
        numeric_columns = data.select_dtypes(include=['number']).columns.drop('Cluster', errors='ignore')
        non_numeric_columns = data.select_dtypes(exclude=['number']).drop(columns=['Cluster'], errors='ignore').columns

        total_plots = len(numeric_columns) + len(non_numeric_columns)
        cols = 2 if total_plots > 2 else total_plots
        rows = (total_plots + cols - 1) // cols

        fig = make_subplots(
            rows=rows, cols=cols,
            subplot_titles=[f' {col} 分析' for col in numeric_columns] +
                           [f' {col} 分析' for col in non_numeric_columns]
        )

        row, col = 1, 1
        # 数值型变量图
        for col_name in numeric_columns:
            grouped = data.groupby('Cluster')[col_name].mean().reset_index()
            bar_trace = go.Bar(x=grouped['Cluster'], y=grouped[col_name], name=col_name)
            fig.add_trace(bar_trace, row=row, col=col)
            fig.update_xaxes(title_text="聚类标签", row=row, col=col)
            fig.update_yaxes(title_text=col_name, row=row, col=col)

            col = col + 1 if col < cols else 1
            row += 1 if col == 1 else 0

        # 类别型变量图
        for col_name in non_numeric_columns:
            hist_data = data.groupby([col_name, 'Cluster']).size().reset_index(name='Count')

            total_per_cluster = hist_data.groupby('Cluster')['Count'].sum().reset_index(name='Total')
            hist_data = pd.merge(hist_data, total_per_cluster, on='Cluster')
            hist_data['Percent'] = hist_data['Count'] / hist_data['Total'] * 100

            for cluster in sorted(hist_data['Cluster'].unique()):
                cluster_data = hist_data[hist_data['Cluster'] == cluster]
                hist_trace = go.Bar(
                    x=cluster_data[col_name],
                    y=cluster_data['Count'],
                    name=f'用户类别 {cluster+1}',
                    customdata=np.stack([cluster_data['Percent'], cluster_data['Cluster']+1], axis=-1),
                    hovertemplate=(
                            '类别: %{x}<br>' +
                            '数量: %{y}<br>' +
                            '占比: %{customdata[0]:.1f}%<br>' +
                            '类型: 用户类别 %{customdata[1]:.0f}<extra></extra>'
                    ),
                    showlegend=(row == 1 and col == 1)  # 只在第一个图上显示图例
                )
                fig.add_trace(hist_trace, row=row, col=col)

            fig.update_xaxes(title_text=col_name, row=row, col=col)
            fig.update_yaxes(title_text="计数占比", row=row, col=col)

            col = col + 1 if col < cols else 1
            row += 1 if col == 1 else 0

        fig.update_layout(
            height=300 * rows,
            width=None,  # ✅ 让图表宽度自适应
            autosize=True,  # ✅ 自动尺寸
            barmode='stack'
        )

        chart_html = pio.to_html(
            fig,
            full_html=False,
            include_plotlyjs='cdn',  # 可选，用于减小页面大小
            config={'responsive': True}  # ✅ 启用响应式
        )

        return render(request, 'cluster_results.html', {
            'chart_html': chart_html,
            'cluster_centers': cluster_centers,
            'cluster_descriptions': cluster_descriptions_dict,  # 使用列表格式
            'num_clusters': num_clusters,  # 添加聚类数量
            'filename': filename
        })

    except (FileNotFoundError, ValueError) as e:
        return render(request, 'error.html', {'error': str(e)})
    except Exception as e:
        return render(request, 'error.html', {'error': f"系统错误：{str(e)}"})

def register(request):
    if request.method == 'POST':
        user_form = UserRegistrationForm(request.POST)
        if user_form.is_valid():
            new_user = user_form.save(commit=False)
            new_user.set_password(user_form.cleaned_data['password'])
            new_user.is_staff = True
            new_user.save()
            UserProfile.objects.create(
                user=new_user,
                security_question=user_form.cleaned_data['security_question'],
                security_answer=make_password(user_form.cleaned_data['security_answer'])
            )
            return render(request, 'register_done.html', {'new_user': new_user})
    else:
        user_form = UserRegistrationForm()
    return render(request, 'register.html', {'form': user_form})

@login_required
def index(request):
    surveys = Survey.objects.filter(status='published')
    return render(request, 'index.html', {'surveys': surveys})

@login_required
def view_survey(request, survey_id):
    survey = get_object_or_404(Survey, id=survey_id)
    if not request.user.is_superuser and survey.created_by != request.user:
        return redirect('survey_list')
    questions = [{'id': q.id, 'type': q.type, 'text': q.text, 'options': list(q.options.all())} for q in survey.questions.all()]
    return render(request, 'view_survey.html', {'survey': survey, 'questions': questions})

@login_required
@csrf_exempt
def delete_survey(request, survey_id):
    if request.method == 'POST':
        survey = get_object_or_404(Survey, id=survey_id)
        survey.delete()
        return redirect('survey_list')
    return redirect('survey_list')

@login_required
def export_survey(request, survey_id):
    survey = get_object_or_404(Survey, id=survey_id)
    if not request.user.is_superuser and survey.created_by != request.user:
        return redirect('survey_list')
    questions = [{'id': q.id, 'type': q.type, 'text': q.text, 'options': list(q.options.all())} for q in survey.questions.all()]
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = f'attachment; filename="survey_{survey.id}.csv"'
    writer = csv.writer(response)
    writer.writerow(['问题类型', '问题内容', '选项'])
    for question in questions:
        row = [question['type'], question['text'], ', '.join(option.text for option in question['options'])]
        writer.writerow(row)
    return response

@login_required
@require_http_methods(["POST"])
@csrf_exempt
def analyze_user_group(request):
    try:
        data = json.loads(request.body)
        user_input = data.get("text", "")
        lang = data.get("lang", "zh-CN")
        if not user_input or len(user_input) < 20 or len(user_input) > 1000:
            return JsonResponse({"status": "error", "message": "输入长度需在20-1000字符之间"}, status=400)
        filename = request.session.get('clustering_filename')
        cluster_centers = request.session.get(f'cluster_centers_{filename}', [])
        num_clusters = len(cluster_centers)
        if not cluster_centers:
            return JsonResponse({"status": "error", "message": "未找到聚类中心数据"}, status=400)
        analysis_result = ai_enhanced_analysis(user_input, cluster_centers, lang)
        return JsonResponse({"status": "success", "data": analysis_result})
    except Exception as e:
        return JsonResponse({"status": "error", "message": str(e)}, status=500)

def clean_text(text):
    text = re.sub(r'[^\w\u4e00-\u9fff]', ' ', text)
    text = ' '.join(jieba.cut(text)) if re.search('[\u4e00-\u9fff]', text) else text.lower()
    return re.sub(r'\s+', ' ', text).strip()

def admin_required(view_func):
    decorated_view_func = login_required(user_passes_test(lambda u: u.is_superuser)(view_func))
    return decorated_view_func

@admin_required
def admin_user_management(request):
    users = User.objects.all().order_by('-date_joined')
    user_data = [{'username': u.username,
                  'id': u.id ,
                  'email': u.email,
                  'date_joined': u.date_joined,
                  'is_superuser': u.is_superuser,
                  'is_staff': u.is_staff,
                  'survey_count': Survey.objects.filter(created_by=u).count()} for u in users]
    return render(request, 'admin_user_management.html', {'users': user_data})

def password_reset_request(request):
    if request.method == 'POST':
        username = request.POST.get('username')
        try:
            user = User.objects.get(username=username)
            profile = UserProfile.objects.get(user=user)
            return render(request, 'password_reset_verify.html', {'username': username, 'security_question': profile.security_question})
        except (User.DoesNotExist, UserProfile.DoesNotExist):
            return render(request, 'password_reset_request.html', {'error': '用户不存在或未设置安全问题'})
    return render(request, 'password_reset_request.html')

def password_reset_verify(request):
    if request.method == 'POST':
        username = request.POST.get('username')
        answer = request.POST.get('security_answer')
        new_password = request.POST.get('new_password')
        new_password2 = request.POST.get('new_password2')
        if not all([username, answer, new_password, new_password2]):
            messages.error(request, '请填写所有字段')
            return render(request, 'password_reset_verify.html', {'username': username, 'security_question': request.POST.get('security_question', '未获取到问题')})
        try:
            user = User.objects.get(username=username)
            profile = UserProfile.objects.get(user=user)
            if check_password(answer, profile.security_answer):
                if new_password == new_password2:
                    user.set_password(new_password)
                    user.save()
                    messages.success(request, '密码重置成功！请使用新密码登录。')
                    return redirect('login')
                else:
                    messages.error(request, '两次输入的新密码不一致')
                    return render(request, 'password_reset_verify.html', {'username': username, 'security_question': profile.security_question})
            else:
                messages.error(request, '安全答案错误')
                return render(request, 'password_reset_verify.html', {'username': username, 'security_question': profile.security_question})
        except (User.DoesNotExist, UserProfile.DoesNotExist):
            messages.error(request, '用户不存在或未设置安全问题')
            return redirect('password_reset_request')
    return redirect('password_reset_request')

def ai_enhanced_analysis(user_input, cluster_centers, lang="zh-CN"):
    num_clusters = len(cluster_centers)
    prompt = build_ai_prompt(user_input, cluster_centers, num_clusters, lang)
    try:
        response = client.chat.completions.create(
            model="deepseek-chat",
            messages=[{"role": "system", "content": "You are a helpful data analysis expert."}, {"role": "user", "content": prompt}],
            max_tokens=500,
            temperature=0.7,
            stream=False
        )
        return parse_ai_response(response.choices[0].message.content.strip(), num_clusters)
    except Exception as e:
        logger.error(f"DeepSeek API error in ai_enhanced_analysis: {str(e)}")
        raise Exception(f"API 调用失败: {str(e)}")

def build_ai_prompt(user_input, cluster_centers, num_clusters, lang="zh-CN"):
    cluster_descriptions = [f"Cluster {i + 1}: 用户群体倾向于{'高' if center[0] > 0 else '低'}了解程度，{'高' if center[1] > 0 else '低'}购买意愿，{'高' if center[2] > 0 else '低'}认识程度，{'高' if center[3] > 0 else '低'}宣传意愿。" for i, center in enumerate(cluster_centers)]
    prompt = f"""
    你是一个数据分析专家，基于以下聚类中心描述和用户输入，分析用户评论属于哪个聚类类别，并提供置信度和关键特征描述。语言使用{lang}。
    聚类中心描述：
    {chr(10).join(cluster_descriptions)}
    用户输入：{user_input}
    我设置的用户相对应的特征是这个 '被动知晓型 (用户1)','低价值观望型 (用户2)''低价值观望型(用户3)',
            '冲动型购买者 (用户4)', '价格敏感者 (用户5)', '被动知晓型 (用户6)','高潜力转化型 (用户7)',
             '稳定支持者 (用户8)', '高认知高参与型 (用户9)'
        返回时候根据用户特征 来转换为用户类别 确定cluster_id
    cluster_id请返回对应的用户类别号，从1开始到9结束。cluster_name描述性名称使用格式为：x了解程度,y购买意愿,z认识程度,o宣传意愿。字母代表是高中低。
    只返回 JSON，不加解释性文字。不添加``` 。请返回以下格式的 JSON：
    {{"cluster_id": 0, "cluster_name": "描述性名称", "confidence": 0.0到1.0之间的置信度, "color": "#HEX颜色代码", "icon": "font-awesome图标类名（如fa-user）", "key_features": ["特征1", "特征2", ...]}}
    """
    return prompt


def parse_ai_response(ai_response, num_clusters):
    try:
        # 清理字符串，去除多余换行符和空格
        cleaned_response = ai_response.strip()
        logger.debug(f"Cleaned AI response for parsing: {cleaned_response}")
        data = json.loads(cleaned_response)

        # 提取字段并验证
        cluster_id = int(data.get("cluster_id", 0))
        if not (0 <= cluster_id < num_clusters):
            logger.warning(f"Invalid cluster_id {cluster_id}, defaulting to 0")
            cluster_id = 0

        confidence = float(data.get("confidence", 0.5))
        if not (0 <= confidence <= 1):
            logger.warning(f"Invalid confidence {confidence}, defaulting to 0.5")
            confidence = 0.5

        key_features = data.get("key_features", ["未提供特征"])
        if not isinstance(key_features, list):
            logger.warning(f"Invalid key_features format: {key_features}, defaulting")
            key_features = ["特征格式错误"]

        return {
            "cluster_id": cluster_id,
            "cluster_name": data.get("cluster_name", f"Cluster {cluster_id + 1}"),
            "confidence": confidence,
            "color": data.get("color", "#6366f1"),
            "icon": data.get("icon", "fa-user"),
            "key_features": key_features
        }
    except json.JSONDecodeError as e:
        logger.error(f"JSON decode error: {str(e)}, raw response: {ai_response}")
        # 尝试通过正则表达式提取信息
        try:
            cluster_id_match = re.search(r'"cluster_id"\s*:\s*(\d+)', ai_response)
            cluster_name_match = re.search(r'"cluster_name"\s*:\s*"([^"]+)"', ai_response)
            confidence_match = re.search(r'"confidence"\s*:\s*([\d.]+)', ai_response)
            key_features_match = re.search(r'"key_features"\s*:\s*\[\s*(.*?)\s*\]', ai_response, re.DOTALL)

            cluster_id = int(cluster_id_match.group(1)) if cluster_id_match else 0
            if not (0 <= cluster_id < num_clusters):
                cluster_id = 0
            cluster_name = cluster_name_match.group(1) if cluster_name_match else f"Cluster {cluster_id + 1}"
            confidence = float(confidence_match.group(1)) if confidence_match else 0.5
            if not (0 <= confidence <= 1):
                confidence = 0.5
            key_features = ["提取失败"]
            if key_features_match:
                features_str = key_features_match.group(1)
                key_features = [f.strip().strip('"') for f in features_str.split(",") if f.strip()]

            return {
                "cluster_id": cluster_id,
                "cluster_name": cluster_name,
                "confidence": confidence,
                "color": "#6366f1",
                "icon": "fa-user",
                "key_features": key_features if key_features else ["解析失败"]
            }
        except Exception as e:
            logger.error(f"Regex parsing failed: {str(e)}, raw response: {ai_response}")
            return {
                "cluster_id": 0,
                "cluster_name": "未知类别",
                "confidence": 0.5,
                "color": "#6366f1",
                "icon": "fa-user",
                "key_features": [f"正则解析错误: {str(e)}"]
            }
    except Exception as e:
        logger.error(f"Unexpected error parsing AI response: {str(e)}, raw response: {ai_response}")
        return {
            "cluster_id": 0,
            "cluster_name": "未知类别",
            "confidence": 0.5,
            "color": "#6366f1",
            "icon": "fa-user",
            "key_features": [f"解析错误: {str(e)}"]
        }
@login_required
@require_http_methods(["POST"])
@csrf_exempt
def generate_ai_report(request):
    try:
        data = json.loads(request.body)
        clusters = data.get("clusters", [])
        metadata = data.get("metadata", {})
        filename = metadata.get("filename", "unknown")
        dimensions = metadata.get("dimensions", 4)
        num_clusters = len(clusters)
        if not clusters:
            return JsonResponse({"status": "error", "message": "未提供聚类数据"}, status=400)
        prompt = f"""
        你是一个数据分析专家，基于以下{num_clusters}个用户中心生成一个详细的分析报告。语言使用zh-CN。
        用户特征数据（每个中心包含{dimensions}个维度：了解程度、购买意愿、认识程度、宣传意愿）：
        {json.dumps(clusters, indent=2)}
        请生成一个结构化的报告，包含以下内容：
        - 概述归类：简要描述每个用户类别的总体特点。
        - 类别分析：为每个用户类别提供描述性名称和特征分析。
        - 建议：基于分析结果提出业务建议。
        使用 Markdown 格式展示出，注意格式，每个用户分析结果另起一行。
        """
        try:
            response = client.chat.completions.create(
                model="deepseek-chat",
                messages=[{"role": "system", "content": "You are a helpful data analysis expert."}, {"role": "user", "content": prompt}],
                max_tokens=3000,
                temperature=0.6,
                stream=False
            )
            report = response.choices[0].message.content.strip()
            return JsonResponse({"status": "success", "report": report})
        except Exception as e:
            logger.error(f"DeepSeek API error in report generation: {str(e)}")
            raise Exception(f"报告生成失败: {str(e)}")
    except Exception as e:
        logger.error(f"Generate report error: {str(e)}")
        return JsonResponse({"status": "error", "message": str(e)}, status=500)