from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render, redirect
from apps.users.models import UserSurveyInfo
from django.views import View
from django import http
from utils.response_code import RETCODE
from django_redis import get_redis_connection
from apps.contents.constants import SCRAPYCODE, err_msg, SCRAPYDATA_AGE, CVANALYZE_AGE
from apps.contents.utils import SCRAPY_DIR, cv_analyze, web_action_analyze,analyze_questionnaire_results


class IndexView(View):
    def get(self, request):
        '''
        提供首页
        :param request:
        :return: 首页
        '''
        return render(request, 'index.html')


class CareerView(View):
    def get(self, request):
        '''
        提供职业画像页面
        :param request:
        :return: 职业画像页面
        '''
        return render(request, 'home.html')

class QuestionnaireView(View):
    def get(self, request):
        '''
        提供问卷页面
        :param request:
        :return: 问卷页面
        '''
        return render(request, 'questionnaire.html')

class JobRecommendView(View):
    def get(self, request):
        '''
        提供推荐职位页面
        :param request:
        :return: 推荐职位页面
        '''
        return render(request, 'job_recommend.html')


class PerfectInfoView(View):
    def get(self, request):
        '''
        提供完善信息页面
        :param request:
        :return: 完善信息页面
        '''
        return render(request, 'perfect_info.html')

class TempView(View):
    def get(self, request):
        '''
        提供临时测试页面
        :param request:
        :return: 临时测试页面
        '''
        return render(request, 'temp.html')

class ScrapyView(LoginRequiredMixin, View):
    login_url = '/users/login/'

    def get(self, request):
        '''
        爬虫逻辑
        :param request:
        :return: JSON
        '''
        # 获取爬取的web名
        scrapyweb = request.GET.get('scrapyweb')
        # 获取kwy1
        keyword = request.GET.get('key1')
        # username = request.user.username
        username = request.user.username
        return SCRAPY_DIR[scrapyweb](keyword, username)
        # username=request.user.username
        # if scrapyweb=='bilibili':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     bilibili = Bilibili()
        #     bilibili.getSession()
        #     mid = bilibili.searchUserUid(key1)
        #     if mid == -1:
        #         return http.JsonResponse('没有改用户的信息')
        #     text = bilibili.unameInfo(mid)
        #     request.session['bilibili'] = text
        #     if text == "":
        #         return http.JsonResponse({'code':SCRAPYCODE.PRIVACYCLOSE,
        #                                   'errmsg':err_msg.get(SCRAPYCODE.PRIVACYCLOSE)})
        #     redis_conn=get_redis_connection('scrapy_data')
        #     redis_conn.setex('bilibili_%s'%username,SCRAPYDATA_AGE,text)
        #     return http.JsonResponse({'code':SCRAPYCODE.OK,'errmsg':err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='csdn':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     csdn = Csdn()
        #     mid = csdn.searchUserInfo(key1)
        #     if mid == -1:
        #         return http.JsonResponse({'code':SCRAPYCODE.NOTFINDUSER,
        #                                   'errmsg':err_msg.get(SCRAPYCODE.NOTFINDUSER)})
        #
        #     text = csdn.Midinfo(mid)
        #     request.session['csdn'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('csdn_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code':SCRAPYCODE.OK,'errmsg':err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='github':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     github = Github()
        #     text = github.unameinfo(key1)
        #     request.session['github'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('github_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code':SCRAPYCODE.OK,'errmsg':err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='jbook':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     jbook = Jbook()
        #     text = jbook.Midinfo(key1)
        #     request.session['jbook'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('jbook_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code':SCRAPYCODE.OK,'errmsg':err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='weibo':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     weibo = Weibo()
        #     text = weibo.Midinfo(key1)
        #     request.session['weibo'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('weibo_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code':SCRAPYCODE.OK,'errmsg':err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='zhihu':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     zhihu = Zhihu()
        #     text = zhihu.Midinfo(key1)
        #     request.session['zhihu'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('zhihu_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code':SCRAPYCODE.OK,'errmsg':err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='leetcode':
        #     # 获取前端发送的数据
        #     key1 = request.GET.get('key1')
        #     leetcode = Leetcode()
        #     zhihu = Zhihu()
        #     text = zhihu.Midinfo(key1)
        #     request.session['leetcode'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('lsstcode_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code': SCRAPYCODE.OK, 'errmsg': err_msg.get(SCRAPYCODE.OK)})
        #     # 处理其他请求方法或非AJAX请求
        #     # 获取前端发送的数据
        # elif scrapyweb=='cloudmusic':
        #     key1 = request.GET.get('key1')
        #     cloudmusic = Cloudmusic()
        #     text = cloudmusic.Midinfo(key1)
        #     request.session['cloudmusic'] = text
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('cloudmusic_%s' % username, SCRAPYDATA_AGE, text)
        #     return http.JsonResponse({'code': SCRAPYCODE.OK, 'errmsg': err_msg.get(SCRAPYCODE.OK)})
        # elif scrapyweb=='douban':
        #     # 获取前端发送的数据
        #     print(1)
        #     key1 = request.GET.get('key1')
        #     douban = Douban()
        #     text1 = douban.movieinfo(key1)
        #     text2 = douban.bookinfo(key1)
        #     request.session['movie'] = text1
        #     request.session['book'] = text2
        #
        #     redis_conn = get_redis_connection('scrapy_data')
        #     redis_conn.setex('douban_movie_%s' % username, SCRAPYDATA_AGE, text1)
        #     redis_conn.setex('douban_book_%s' % username, SCRAPYDATA_AGE, text2)
        #     return http.JsonResponse({'code': SCRAPYCODE.OK, 'errmsg': err_msg.get(SCRAPYCODE.OK)})


#class CvAnalyzeView(LoginRequiredMixin, View):
 #   login_url = '/users/login/'

  #  def post(self, request):

   #     file = request.FILES.get('pdffile')
    #    if file is None:
       #     return http.HttpResponseForbidden('缺少必传参数')
     #   username = request.user.username
      #  cv_result = cv_analyze(file)
        # redis_conn = get_redis_connection('analyze_data')
        # redis_conn.setex('cv_%s' % username, CVANALYZE_AGE, cv_result)
        # return http.JsonResponse({'code': RETCODE.OK, 'errmsg': '成功', 'data': cv_result})


       # result_dict = web_action_analyze(username)
       # result_dict['cv'] = cv_result
       # print("*" * 30)
       # print(result_dict)
        # redis_conn = get_redis_connection('analyze_data')
        # result_dict['cv'] = redis_conn.get('cv_%s' % username)

        # if result_dict['cv'] is None:
        #     result_dict['cv'] = -1
        # else:
        #     result_dict['cv'] = result_dict['cv'].decode()
       # return render(request, 'paper.html', result_dict)

from apps.users.models import UserInfo
class CvAnalyzeView(LoginRequiredMixin, View):
    login_url = '/users/login/'

    def post(self, request):

        file = request.FILES.get('pdffile')
        username = request.user.username
        user = request.user

        # 1. 进行简历分析
        if file is None:
            cv_result = "未上传简历"
        else:
            try:
                cv_result = cv_analyze(file)
            except Exception as e:
                print(f"Error during CV analysis: {e}")
                cv_result = "简历分析失败，请检查文件或稍后重试。"

        # 2. 进行网络行为分析
        try:
            result_dict = web_action_analyze(username)
        except Exception as e:
            print(f"Error during web action analysis: {e}")
            result_dict = {'error': "网络行为分析失败，请稍后重试。"} # 提供一个基础字典以防出错

        # 3. 从 session 获取问卷分析结果 (AI分析后的文本)
        questionnaire_analysis = request.session.get('questionnaire_analysis')
        
        # 如果session中没有分析结果，尝试重新生成
        if not questionnaire_analysis:
            try:
                # 获取问卷原始结果
                questionnaire_raw_results = request.session.get('questionnaire_raw_results', {})
                if questionnaire_raw_results:
                    # 重新分析问卷结果，确保包含用户完善信息
                    from apps.contents.utils import analyze_questionnaire_results
                    questionnaire_analysis = analyze_questionnaire_results(username, questionnaire_raw_results)
                else:
                    # 如果没有问卷结果，但有用户完善信息，则生成基于用户信息的简单分析
                    try:
                        survey_info = UserSurveyInfo.objects.get(user=user)
                        questionnaire_analysis = f"用户基本信息分析：\n\n用户来自{survey_info.place}，年龄段为{survey_info.age_group}，期望在{survey_info.want_city}发展，从事{survey_info.want_job}工作，有{survey_info.work_experience}年实习经验。\n\n建议完成问卷测试获取更详细的分析。"
                    except:
                        questionnaire_analysis = '问卷分析结果未找到或未完成问卷。请先完成问卷测试或填写个人信息。'
            except Exception as e:
                print(f"重新分析问卷结果失败: {e}")
                questionnaire_analysis = '问卷分析结果未找到或未完成问卷。'

        # 3.1 从 session 获取问卷原始结果 (用于绘图)
        questionnaire_raw_results = request.session.get('questionnaire_raw_results', {}) # 获取原始结果字典

        # 3.2 获取用户完善的信息
        try:
            survey_info = UserSurveyInfo.objects.get(user=user)
            user_survey_data = {
                'place': survey_info.place,
                'age_group': survey_info.age_group,
                'want_city': survey_info.want_city,
                'want_job': survey_info.want_job,
                'work_experience': survey_info.work_experience,
                'real_name': survey_info.real_name,
                'phone': survey_info.phone
            }
        except:
            user_survey_data = {}

        # 4. 组合所有结果
        result_dict['cv'] = cv_result
        result_dict['questionnaire'] = questionnaire_analysis # AI 分析文本结果
        result_dict['questionnaire_raw'] = questionnaire_raw_results # 原始结果，用于绘图
        result_dict['user_survey_data'] = user_survey_data # 用户完善的信息
        print(f"答题结果：{result_dict['questionnaire_raw']}")
        print(f"用户完善信息：{result_dict['user_survey_data']}")
        
        # 5. (可选) 将组合结果存储到数据库
        try:
            userinfo, _ = UserInfo.objects.get_or_create(user=user)
            userinfo.result = f"{result_dict}" # 存储合并后的结果
            userinfo.save()
        except Exception as e:
            print(f"Error saving analysis results to UserInfo: {e}")

        # 6. (可选) 清除 session 中的问卷相关结果
        if 'questionnaire_analysis' in request.session:
            del request.session['questionnaire_analysis']
        if 'questionnaire_raw_results' in request.session: # 清除原始结果
            del request.session['questionnaire_raw_results']
        # 同时也可以清除各个问卷的单独结果 (如果 clear_results 没有被调用的情况下)
        for q_type in ['q1', 'q2', 'q3', 'q4', 'q5']:
             result_key = f'test_result_{q_type}'
             if result_key in request.session:
                 del request.session[result_key]


        print("All analysis combined.")
        # 7. 渲染最终的报告页面
        return render(request, 'paper.html', result_dict)

class ActionAnalyzeView(View):
    login_url = '/users/login/'
    def post(self, request):
        print("简历sdfffsss\n\n\n")
        username = request.user.username
        result_dict = web_action_analyze(username)
        redis_conn = get_redis_connection('analyze_data')
        result_dict['cv'] = redis_conn.get('cv_%s' % username)
        if result_dict['cv'] is None:
            result_dict['cv']=-1
        else:
            result_dict['cv']=result_dict['cv'].decode()
        user=request.user
        userinfo,_= UserInfo.objects.get_or_create(user=user)
        userinfo.result=f"{result_dict}"
        userinfo.save()
        return render(request, 'paper.html', result_dict)

# 在文件中添加导入
from django.shortcuts import render, redirect
from apps.users.models import UserSurveyInfo
from django.contrib.auth.mixins import LoginRequiredMixin

# 修改现有的 PerfectInfoView
class PerfectInfoView(LoginRequiredMixin, View):
    login_url = '/users/login/'
    
    def get(self, request):
        '''
        提供完善信息页面
        :param request:
        :return: 完善信息页面
        '''
        # 获取用户已有信息
        user = request.user
        try:
            survey_info = UserSurveyInfo.objects.get(user=user)
        except:
            survey_info = None
            
        return render(request, 'perfect_info.html', {'survey_info': survey_info})

# 添加新的视图处理表单提交
class SubmitPerfectInfoView(LoginRequiredMixin, View):
    login_url = '/users/login/'
    
    def post(self, request):
        '''
        处理完善信息表单提交
        :param request:
        :return: 重定向到CareerHub页面
        '''
        user = request.user
        # 获取表单数据
        place = request.POST.get('place')
        age_group = request.POST.get('age_group')
        want_city = request.POST.get('want_city')
        want_job = request.POST.get('want_job')
        work_experience = request.POST.get('work_experience', 0)
        real_name = request.POST.get('real_name')
        phone = request.POST.get('phone')
        
        # 更新或创建用户信息
        try:
            survey_info = UserSurveyInfo.objects.get(user=user)
            # 更新现有记录
            survey_info.place = place
            survey_info.age_group = age_group
            survey_info.want_city = want_city
            survey_info.want_job = want_job
            survey_info.work_experience = work_experience
            survey_info.real_name = real_name
            survey_info.phone = phone
            survey_info.save()
        except UserSurveyInfo.DoesNotExist:
            # 创建新记录
            UserSurveyInfo.objects.create(
                user=user,
                place=place,
                age_group=age_group,
                want_city=want_city,
                want_job=want_job,
                work_experience=work_experience,
                real_name=real_name,
                phone=phone
            )
        
        # 提交成功后重定向到CareerHub页面
        return redirect('contents:careerhub')
