from django.shortcuts import render, HttpResponse
from app.models import Case,Case2
import jieba
from fuzzywuzzy import fuzz,process
#基于TF-IDF 文本相似性实战
# coding=utf-8
import jieba.analyse,os
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import pandas as pd
from scipy.linalg import norm
from tqdm import tqdm
def pretreatment(text):#数据预处理
    # 1、分词处理
    text = jieba.cut(text.strip())
    # 2、去除停用词
    # 创建一个停用词列表
    stopwords = [line.strip() for line in
                 open('D:\pythonCode\MedicalProject\\app\data\stopwords.txt', encoding='UTF-8').readlines()]
    sentence = ''
    # 去停用词
    for word in text:
        if word not in stopwords:
            if word != '\t':
                sentence += word
                sentence += " "
    return sentence

# Create your views here.
def do_search(request):
    if request.method=="POST":
        #1、获取搜索页面提交的描述字符
        description=request.POST.get("description")
        #2、文本预处理
        sentence = pretreatment(description)
        #3、获取数据库中的全部信息
        data_list = Case.objects.all()
        #4、创建数据字典
        data_dict={}
        #5、计算相似度
        for data in data_list:
            name = data.name
            symbols = data.symptom
            symbols = pretreatment(symbols)#对数据库中的详细症状进行数据处理
            #value = fuzz.token_sort_ratio(sentence, symbols)#使用模糊匹配算法求value
            value = tf_similarity(sentence, symbols)#使用tf算法求value
            if(value!=0):
                data_dict[name]=value

        #6、根据字典键值进行排序
        Dict = sorted(data_dict.items(), key=lambda x:x[1], reverse = True)
        for key in Dict:
            print(key[0], key[1])

        #7、将字典中的病症名称进行过滤,先遍历字典再遍历数组，顺序不能乱，要按照字典中排列好的顺序显示
        Data=[]
        for key in Dict:
            for data in data_list:
                if data.name==key[0]:
                    Data.append(data)

        #for d in Data:
        #   print(d)
        #8、将数组中的值返回给列表页面
        return render(request, 'get_list.html', {"data_list": Data})
    return render(request,'do_search.html')


def do_search_(request):
    if request.method=="POST":
        #1、获取搜索页面提交的描述字符
        description=request.POST.get("description")
        #2、文本预处理
        sentence = pretreatment(description)#-------对前端页面获取的症状描述进行数据处理
        #3、获取数据库中的全部信息
        data_list = Case2.objects.all()
        #4、创建数据字典
        data_dict={}
        #5、计算相似度
        for data in data_list:
            name = data.case_name
            symbols = data.case_detail_symptoms
            symbols = pretreatment(symbols)#--------对数据库中的详细症状进行数据处理
            #value = fuzz.token_sort_ratio(sentence, symbols)#使用模糊匹配算法求value
            value = tf_similarity(sentence, symbols)#使用tf算法求value
            if(value!=0 and value>=0.1):#设置相似度阈值为0.5
                data_dict[name]=value

        #6、根据字典键值进行排序
        Dict = sorted(data_dict.items(), key=lambda x:x[1], reverse = True)
        for key in Dict:
            print(key[0], key[1])

        #7、将字典中的病症名称进行过滤,先遍历字典再遍历数组，顺序不能乱，要按照字典中排列好的顺序显示
        Data=[]
        for key in Dict:
            for data in data_list:
                if data.case_name==key[0]:
                    Data.append(data)

        #for d in Data:
        #   print(d)
        #8、将数组中的值返回给列表页面
        return render(request, 'get_list_.html', {"data_list": Data})
    return render(request,'do_search_.html')




def get_details(request,uname):
    data_details = Case.objects.all().filter(name=uname).first()
    return render(request,'get_details.html',{"data_details":data_details})


def get_details_(request,uname):
    data_details = Case2.objects.all().filter(case_name=uname).first()
    return render(request,'get_details_.html',{"data_details":data_details})

#基于TF-IDF 文本相似匹配
def tf_similarity(s1, s2):
    cv = CountVectorizer(tokenizer=lambda s: s.split())
    corpus = [s1, s2]
    vectors = cv.fit_transform(corpus).toarray()
    # print(vectors)
    # 计算TF系数
    return np.dot(vectors[0], vectors[1]) / (norm(vectors[0]) * norm(vectors[1]))
'''
s1="今天的天气很好啊"
s2="今天的天气真不错啊"
s3="我在写作业"
s1=" ".join(s1)
s2=" ".join(s2)
s3=" ".join(s3)
print(tf_similarity(s1, s2))
print(tf_similarity(s1, s3))

'''
