from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.models import User
from django.contrib import messages
from django.http import JsonResponse
from django.db.models import Count
from django.conf import settings

import requests
from http import HTTPStatus
from urllib.parse import urlparse, unquote
from pathlib import PurePosixPath
from dashscope import ImageSynthesis
import dashscope
import os
import copy
import random

from hanlp_restful import HanLPClient
from .forms import RegisterForm
from .models import (
    UserProfile,
    Word,
    TermDatabase,
    Subject,
    UserCollection,
    CharacterFrequency,
    GeneralFrequency,
    LEVEL_CHOICES  # 原代码中可能是拼写错误，使用 LEVEL_CHOICES
)

HanLP = HanLPClient('https://www.hanlp.com/hanlp/v21/redirect', auth='67e27245eaf6d8f49fa25995', language='zh')
change_yu= {
    'Arg0': '施事',
    'Arg1': '受事',
    'Arg2': '范围',
    'Arg3': '动作开始',
    'Arg4': '动作结束',
    'AIg5': '其他动词相关',
    'ArgM-ADV': '状语',
    'AgM-BNF': '受益人',
    'AIgM-CND': '条件',
    'ArgM-DIR': '方向',
    'AIgM-DIS': '标记语',
    'AIgM-DGR': '程度',
    'ArgM-EXT': '范围',
    'ArgM-FRQ': '频率',
    'ArgM-LOC': '地点',
    'ArgM-MNR': '方式',
    'ArgM-PRP': '目的',
    'AIgM-TMP': '时间',
    'AIgM-TPC': '主题',
    'pred': '谓语',
}
change_ci = {
    'AD': '副词',
    'AS': '动态助词',
    'BA': '结构助词把/将',
    'CC': '并列连词',
    'CD': '数词或概数词',
    'CS': '关系连词',
    'DEC': '补语成分“的”',
    'DEG': '属格“的”',
    'DER': '助词“得”',
    'DEV': '助词“地”',
    'DT': '限定词',
    'ETC': '表示省略',
    'EM': '表情符',
    'FW': '外来语',
    'IC': '不完整成分',
    'IJ': '句首感叹词',
    'JJ': '其他名词修饰语',
    'LB': '被动标记',
    'LC': '方位词',
    
    'MSP': '其他助词',
    'NN': '普通名词',
    'NOI': '噪声',
    'NR': '专有名词',
    'NT': '时间名词',
    'OD': '序数词',
    'ON': '拟声词',
    
    'PN': '代词',
    'PU': '标点符号',
    'SB': '被动标记',
    'SP': '句末语气词',
    'URL': '网址',
    'VA': '性质形容词',
    'VC': '系动词',
    'VE': '动词有/无',
    'VV': '其他动词',
    'M': '量词',
    'P': '介词',
}

def replace_keys_with_values(input_string, mapping_dict):
    for key, value in mapping_dict.items():
        input_string = input_string.replace(key, value)
    return input_string

def register_view(request):
    subjects = Subject.objects.all()
    if request.method == 'POST':
        form = RegisterForm(request.POST)
        if form.is_valid():
            user = form.save(commit=False)
            user.set_password(form.cleaned_data['password'])
            user.save()

            chinese_level = form.cleaned_data.get('chinese_level')
            subject_code = form.cleaned_data.get('subject_code')
            subject_name = form.cleaned_data.get('subject_name')

            # 处理学科信息
            if subject_code:
                try:
                    subject = Subject.objects.get(code=subject_code)
                except Subject.DoesNotExist:
                    if subject_name:
                        try:
                            subject = Subject.objects.get(name=subject_name)
                        except Subject.DoesNotExist:
                            subject = None
                    else:
                        subject = None
            elif subject_name:
                try:
                    subject = Subject.objects.get(name=subject_name)
                except Subject.DoesNotExist:
                    subject = None
            else:
                subject = None

            user_profile, created = UserProfile.objects.get_or_create(user=user)
            user_profile.chinese_level = chinese_level
            user_profile.subject = subject
            user_profile.save()

            messages.success(request, "注册成功。")
            return redirect('login')
    else:
        form = RegisterForm()
    return render(request, 'core/register.html', {'form': form, 'subjects': subjects})

def login_view(request):
    if request.method == 'POST':
        form = AuthenticationForm(request, data=request.POST)
        if form.is_valid():
            user = form.get_user()
            login(request, user)
            # 登录成功后重定向到 index 页面
            return redirect('index')
        else:
            messages.error(request, "Invalid credentials.")
    else:
        form = AuthenticationForm()
    return render(request, 'core/login.html', {'form': form})



def logout_view(request):
    logout(request)
    return redirect('index')


@login_required
def profile_view(request):
    collections = UserCollection.objects.filter(user=request.user)
    try:
        user_profile = UserProfile.objects.get(user=request.user)
        # 获取中文水平对应的中文描述
        chinese_level_dict = dict(LEVEL_CHOICES)
        chinese_level = chinese_level_dict.get(int(user_profile.chinese_level))
        subject = user_profile.subject
        if subject:
            subject_code = subject.code
            subject_name = subject.name
        else:
            subject_code = "未设置"
            subject_name = "未设置"
    except UserProfile.DoesNotExist:
        chinese_level = "未设置"
        subject_code = "未设置"
        subject_name = "未设置"

    return render(request, 'core/profile.html', {
        'collections': collections,
        'chinese_level': chinese_level,
        'subject_code': subject_code,
        'subject_name': subject_name
    })

@login_required
def collect_term(request, term_id):
    term = get_object_or_404(TermDatabase, id=term_id)
    UserCollection.objects.get_or_create(user=request.user, term=term)
   
    return redirect('term_detail', term_id=term_id)

@login_required
def collect_word(request, word_id):
    word = get_object_or_404(Word, id=word_id)
    UserCollection.objects.get_or_create(user=request.user, word=word)
    return redirect('word_detail', word_id=word_id)




def visualize_text(request):
    result = None
    if request.GET.get("text"):
        text_input = request.GET.get("text")
        # 示例API请求
        prompt = text_input+"严格符合原句语义，科学严谨。"

        print('----sync call, please wait a moment----')
        rsp = ImageSynthesis.call(api_key=os.getenv("DASHSCOPE_API_KEY"),
                                  model="wanx2.1-t2i-turbo",
                                  prompt=prompt,
                                  n=1,
                                  size='1024*1024')
        print('response: %s' % rsp)
        if rsp.status_code == HTTPStatus.OK:



            # result = rsp.output.results
            # print(result.url)
            # result=requests.get(result.url).content
            # 在当前目录下保存图片
            for result in rsp.output.results:
                print(result.url)
                return render(request, 'visual_result.html', {'result': result})
                # file_name = PurePosixPath(unquote(urlparse(result.url).path)).parts[-1]
                # with open('./%s' % file_name, 'wb+') as f:
                #     f.write(requests.get(result.url).content)

        else:
            print('sync_call Failed, status_code: %s, code: %s, message: %s' %
                  (rsp.status_code, rsp.code, rsp.message))





def term_database_view(request):
    terms = TermDatabase.objects.all()
    return render(request, 'core/term_database.html', {'terms': terms})


from django.shortcuts import render
from .models import Word, TermDatabase

def search(request):
    query = request.GET.get('query')
    words = []
    terms = []

    if query:
        words = Word.objects.filter(word__icontains=query)
        terms = TermDatabase.objects.filter(term_name__icontains=query)
    
    # 修正返回数据格式
    return render(request, 'search_results.html', {'words': [words], 'terms': [terms], 'query': query})



from django.shortcuts import get_object_or_404, render

# 假设这是生成例句的函数
def generate_examples(content):


    messages = [
        {'role': 'system', 'content': 'You are a helpful assistant.'},
        {'role': 'user', 'content': '请给'+content+'造三个个例句，不要对这个词加以评价'}
        ]
    response = dashscope.Generation.call(
        # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
        api_key=os.getenv('DASHSCOPE_API_KEY'),
        model="qwen-plus", # 此处以qwen-plus为例，可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        messages=messages,
        result_format='message'
        )
    print(response["output"]["choices"][0]["message"]["content"])
    return response["output"]["choices"][0]["message"]["content"].split('\n')

def term_detail(request, term_id):
    term = get_object_or_404(TermDatabase, id=term_id)
    examples = generate_examples(term.term_name)
    if(term.semantic_transparency!=None):

        if(term.semantic_transparency==1):
            semantic_transparency=1
        else:
            semantic_transparency=(term.semantic_transparency-1)*25
    else:
        semantic_transparency=0

    
    print(semantic_transparency)
    return render(request, 'term_detail.html', {'term': term, 'examples': examples,'semantic_transparency':semantic_transparency})

def word_detail(request, word_id):
    word = get_object_or_404(Word, id=word_id)
    examples = generate_examples(word.word)
    return render(request, 'word_detail.html', {'word': word, 'examples': examples})




def subject_search(request):
    
    query = request.GET.get('query', '')
    subjects = Subject.objects.filter(code__icontains=query) | Subject.objects.filter(name__icontains=query)
    data = [{'code': subject.code, 'name': subject.name} for subject in subjects]
    return JsonResponse(data, safe=False)


def visualize_view(request):
    return render(request, 'visualize.html')

def nlp_analysis_view(request):
    return render(request, 'nlp_analysis.html')




def get_user_similarity_term(user1, user2):
    """
    计算两个用户的相似度，通过比较他们收藏的术语数量
    """
    user1_collections = set(UserCollection.objects.filter(user=user1).values_list('term', flat=True))
    user2_collections = set(UserCollection.objects.filter(user=user2).values_list('term', flat=True))
    intersection = len(user1_collections.intersection(user2_collections))
    union = len(user1_collections.union(user2_collections))
    return intersection / union if union != 0 else 0

def get_user_similarity_word(user1, user2):
    """
    计算两个用户的相似度，通过比较他们收藏的单词数量
    """
    user1_collections = set(UserCollection.objects.filter(user=user1, word__isnull=False).values_list('word', flat=True))
    user2_collections = set(UserCollection.objects.filter(user=user2, word__isnull=False).values_list('word', flat=True))
    intersection = len(user1_collections.intersection(user2_collections))
    union = len(user1_collections.union(user2_collections))
    return intersection / union if union != 0 else 0

def recommend_words(user):
    try:
        user_profile = UserProfile.objects.get(user=user)
        user_level = user_profile.chinese_level
        user_subject = user_profile.subject
    except UserProfile.DoesNotExist:
        user_level = None
        user_subject = None

    user_collections = UserCollection.objects.filter(user=user, word__isnull=False)
    if user_collections.exists():
        # 老用户，使用协同过滤算法
        all_users = UserCollection.objects.filter(word__isnull=False).values('user').annotate(count=Count('user')).order_by('-count')
        similar_users = []
        for other_user in all_users:
            other_user_obj = User.objects.get(id=other_user['user'])
            if other_user_obj == user:
                continue  # 跳过当前用户
            similarity = get_user_similarity_word(user, other_user_obj)
            if similarity > 0:
                similar_users.append((other_user_obj, similarity))
        similar_users.sort(key=lambda x: x[1], reverse=True)

        recommended_words = []
        for similar_user, _ in similar_users[:5]:
            similar_user_collections = UserCollection.objects.filter(user=similar_user, word__isnull=False)
            for collection in similar_user_collections:
                if collection.word and collection.word not in [col.word for col in user_collections]:
                    recommended_words.append(collection.word)
        recommended_words = list(set(recommended_words))  # 去重
        print(recommended_words)
        if user_level:
            recommended_words = [word for word in recommended_words if word.level == int(user_level)]
        if user_subject:
            # 假设 Word 模型有一个关联学科的字段，这里暂时用注释表示，实际使用时需调整
            # recommended_words = [word for word in recommended_words if word.subject == user_subject]
            pass
            
        recommended_words = recommended_words[:4]
    else:
        # 新用户，随机推荐四个单词
        if user_level:
            words = Word.objects.filter(level=int(user_level))
        else:
            words = Word.objects.all()
        recommended_words = random.sample(list(words), min(4, len(words)))
    return recommended_words

def recommend_terms(user):
    """
    为用户推荐术语，根据用户是否为新用户采用不同的推荐策略
    """
    
    try:
        user_profile = UserProfile.objects.get(user=user)
        user_subject = user_profile.subject if hasattr(user_profile, 'subject') else None
    except UserProfile.DoesNotExist:
        user_subject = None

    # 检查用户是否有收藏记录
    user_collections = UserCollection.objects.filter(user=user)
    if user_collections.exists():
        print('1')
        # 老用户，使用协同过滤算法
        all_users = UserCollection.objects.values('user').annotate(count=Count('user')).order_by('-count')
        similar_users = []
        for other_user in all_users:
            other_user_obj = User.objects.get(id=other_user['user'])
            if other_user_obj == user:
                continue  # 跳过当前用户
            similarity = get_user_similarity_term(user, other_user_obj)
            if similarity > 0:
                similar_users.append((other_user_obj, similarity))
        similar_users.sort(key=lambda x: x[1], reverse=True)

        recommended_terms = []
        for similar_user, _ in similar_users[:5]:
            similar_user_collections = UserCollection.objects.filter(user=similar_user)
            for collection in similar_user_collections:
                if collection.term and collection.term not in [col.term for col in user_collections]:
                    recommended_terms.append(collection.term)
        recommended_terms = list(set(recommended_terms))  # 去重
       

    else:
        print('2')
        # 新用户，随机推荐自己学科领域的术语
        if user_subject:
            print('3')
            terms = TermDatabase.objects.filter(term_field=user_subject.name)
            print(user_subject.name)
        else:
            print('4')
            terms = TermDatabase.objects.all()
        recommended_terms = random.sample(list(terms), min(100, len(terms)))
    
    # print(recommended_terms)
    # 筛选出 is_out_of_scope 为 1 且 is_truly_out_of_scope 为 1 的术语
    truly_out_of_scope_terms = [term for term in recommended_terms if term.is_out_of_scope == 1 and term.is_truly_out_of_scope == 1]
    truly_out_of_scope_recommendations = []
    for term in truly_out_of_scope_terms:
        total_frequency = 0
        for char in term.term_name:
            try:
                if term.is_general == 1:
                    char_freq = CharacterFrequency.objects.get(character=char)
                    total_frequency += char_freq.frequency
                else:
                    gen_freq = GeneralFrequency.objects.get(character=char)
                    total_frequency += gen_freq.frequency
            except (CharacterFrequency.DoesNotExist, GeneralFrequency.DoesNotExist):
                pass
        truly_out_of_scope_recommendations.append((term, total_frequency))
    truly_out_of_scope_recommendations.sort(key=lambda x: x[1], reverse=True)
    truly_out_of_scope_recommendations = [term[0] for term in truly_out_of_scope_recommendations[:2]]

    # 筛选出 is_out_of_scope 为 1 且 is_truly_out_of_scope 为 0 的术语
    false_out_of_scope_terms = [term for term in recommended_terms if term.is_out_of_scope == 1 and term.is_truly_out_of_scope == 0]
    false_out_of_scope_recommendations = []
    for term in false_out_of_scope_terms:
        semantic_transparency = int(term.semantic_transparency) if term.semantic_transparency else 0
        false_out_of_scope_recommendations.append((term, semantic_transparency))
    false_out_of_scope_recommendations.sort(key=lambda x: x[1], reverse=True)
    false_out_of_scope_recommendations = [term[0] for term in false_out_of_scope_recommendations[:4]]

    return truly_out_of_scope_recommendations, false_out_of_scope_recommendations

@login_required
def index(request):
    daily_word = None
    truly_out_of_scope_recommendations = []
    false_out_of_scope_recommendations = []
    recommended_words = []

    if request.user.is_authenticated:
        words = Word.objects.all()
        if words:
            daily_word = random.choice(words)
        truly_out_of_scope_recommendations, false_out_of_scope_recommendations = recommend_terms(request.user)
        recommended_words = recommend_words(request.user)
    print(daily_word,recommend_words,truly_out_of_scope_recommendations,false_out_of_scope_recommendations)
    return render(request, 'index.html', {
        'daily_word': daily_word,
        'truly_out_of_scope_recommendations': truly_out_of_scope_recommendations,
        'false_out_of_scope_recommendations': false_out_of_scope_recommendations,
        'recommended_words': recommended_words
    })

    
def nlp_processing(request):
    analysis_type = request.GET.get("analysis_type", "all")
    text_input = request.GET.get("text")
    result = []

    if text_input:
        if analysis_type in ["all", "tokenize", "tokenize_search"]:
            print("分词：")
            tokenize_result = HanLP.tokenize(text_input)
            print(tokenize_result)
            result.append(tokenize_result[0])

            if analysis_type == "tokenize_search":
                # 对每个分词进行搜索
                all_words = []
                all_terms = []

                for token in tokenize_result[0]:
                    query=token
                    words = []
                    terms = []

                    if query:
                        words = Word.objects.filter(word=query)
                        terms = TermDatabase.objects.filter(term_name=query)
                    all_words.append(words)
                    all_terms.append(terms)
                    
                
                return render(request, 'search_results.html', {'words': all_words, 'terms': all_terms, 'query': text_input})
        else:
            result.append([])

        if analysis_type in ["all", "pos"]:
            print("词性标注：")
            pos_result = HanLP.parse(text_input, tasks='pos/ctb')
            # 处理词性标注结果，转换为表格数据
            pos_table_data = []
            
            for word, pos in zip(pos_result['tok/fine'], pos_result['pos/ctb']):
                new_pos=[]
                
                
                for p in range( len(pos)):
                    new_pos.append(change_ci[pos[p]])
                    pos_table_data.append({'word': word[p], 'pos': new_pos[p]})
                    
                # 根据 change 字典替换 pos 值
               
            # pos_table_data.append({'word': word, 'pos': new_pos,'num':num})
            result.append(pos_table_data)
        else:
            if analysis_type != "all":
                result.append([])

        if analysis_type in ["all", "srl"]:
            print("语义角色标注：")
            srl_doc = HanLP.parse(text_input, tasks=['srl'])
            srl_doc.pretty_print()
            srl_str = '\n\n'.join(srl_doc.to_pretty())
            change_yu_upper = {key.upper(): value for key, value in change_yu.items()}
            for key, value in change_yu_upper.items():
                srl_str = srl_str.replace(key, value)
            srl_str = srl_str.replace('\t', '      ')
            print(srl_str)
            result.append(srl_str)
        else:
            if analysis_type != "all":
                result.append([])

        if analysis_type in ["all", "ner"]:
            print("专名识别:")
            ner_result = HanLP.parse(text_input, tasks='ner/msra')
            result.append(ner_result['ner/msra'])
        else:
            if analysis_type != "all":
                result.append([])

        if analysis_type in ["all", "coref"]:
            print("指代消解:")
            coref_data = HanLP.coreference_resolution(text_input)
            zd = []
            clusters = coref_data['clusters']
            tokens = coref_data['tokens']

            for cluster in clusters:
                sentence = ''.join(tokens)
                sentence_list = copy.deepcopy(tokens)
                for index_pair in cluster:
                    start_index = index_pair[1]
                    end_index = index_pair[2] - 1
                    sentence_list[start_index] = f"({sentence_list[start_index]}"
                    sentence_list[end_index] = f"{sentence_list[end_index]})"
                formatted_sentence = ''.join(sentence_list)
                zd.append(formatted_sentence)
            result.append(zd)
        else:
            if analysis_type != "all":
                result.append([])

        if analysis_type in ["all", "con"]:
            print("成分句法分析:")
            con_doc = HanLP.parse(text_input, tasks=['pos', 'con'])
            con_str = '\n\n'.join(con_doc.to_pretty())
            for key, value in change_ci.items():
                con_str = con_str.replace(key, value)
            result.append(con_str)
        else:
            if analysis_type != "all":
                result.append([])

    print(result)
    return render(request, 'nlp_result.html', {'result': result, 'analysis_type': analysis_type})





def paper_polish(request):
    result = None
    if request.method == 'POST':
        text = request.POST.get('text')
        if text:
            dashscope.api_key = settings.DASHSCOPE_API_KEY  # 从配置文件获取 API Key
            messages = [
                {'role': 'system', 'content': '你是一位专业的学术编辑，擅长对学术论文进行润色。请对输入的文本进行润色，提升语言的专业性和准确性。并给出润色理由。'},
                {'role': 'user', 'content': text}
            ]
            try:
                response = dashscope.Generation.call(
                    model="qwen-plus",
                    messages=messages,
                    result_format='message'
                )
                if response.status_code == 200:
                    result = response.output.choices[0].message.content
            except Exception as e:
                result = f"润色失败: {str(e)}"
    return render(request, 'paper_polish.html', {'result': result})

