from django.shortcuts import render
from django.http import HttpResponse,JsonResponse
import csv
import jieba
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
import heapq
import pandas as pd
# Create your views here.
def sum(request):
    return HttpResponse("第一个视图函数")

def catalogs(request):
    cats={'data2':['小说','计算机','建筑','艺术']}
    return JsonResponse(cats)

def rights(request):
    zd={'小说':["ms sqlserver 从删库到跑路","计算机视觉"]
        ,'计算机':["台式机","笔记本"]
        ,'建筑':["少林寺风格","大明寺"]
        ,'艺术':["自由女神像","蒙娜丽莎"]}
    id=request.GET.get('id')
    print(zd[id])
    return JsonResponse({'data':zd[id]})

#爬取网站
import requests as re
def scrapy2(request):
    id=request.GET.get('title')
    print(id)
    print('111')
    data=[]
    url = "http://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId=20200603191830898130253837307269756&returnType=json&channelId=70000&clientVersionNo=5.8.4&platformSource=DDDS-P&fromPlatform=106&deviceType=pconline&token=&start=0&end=20&category=XS2&dimension=dd_sale"
    # 请求数据
    res = re.get(url=url)
    rs = res.json()
    books = rs["data"]["saleList"]
    print(data)
    for book in books:
        bk = book["mediaList"][0]
        print('标题', bk["title"])
        print('价格', bk["salePrice"])
        print('图片', bk["coverPic"])
        dicts = {'title': bk["title"], 'salePrice': bk["salePrice"], "img": bk["coverPic"]}
        data.append(dicts)
    return JsonResponse({'data3':data})

def detail(request):
    title=request.GET.get('title')
    print(title)
    with open('../../data.csv', 'r+',encoding='utf-8') as f:
        reader = csv.reader(f)
        datas=[]
        for i in reader:
            if len(i)!=0:
                datas.append(i)
        # 找出该书籍
        data=[]
        for i in datas:
                if i[0]==title:
                    dicts = {'title': i[0],'categorys':i[1], 'salePrice': i[2], "img": i[3],'descs':i[4],'creationDate':i[5]}
                    data.append(dicts)
                    break
        #获取每个书籍的介绍
        txt=[]
        for i in datas:
                a=jieba.cut(i[4], cut_all=True)
                txt.append(" ".join(a))
    # print(data)


    # print(txt)
    #     实例化对象
        tfd = TfidfVectorizer()
        a = tfd.fit_transform(txt)
        #列表中找出指定值所在的索引
        n=[i[0] for i in datas].index(title)
        # print("提取的词库",tfd.get_feature_names())
        # print("数字化",a.todense())
        # 实例化对象,找出该书籍相似分数列表
        b = linear_kernel(a, a)[n]
        print(b)
        #找到3个最大相似分数的索引
        c=list(map(list(b).index, heapq.nlargest(4, b)))
        print(c,'xxxxxxxx')
        #找到最大三个相似分数的书籍
        print(len(datas))
        data1=[]
        for i in c:
            d=datas[i]
            # print(d)
            dicts={'title':d[0],'img':d[3]}
            data1.append(dicts)
        print(data1)
    return JsonResponse({'data3':data,'data4':data1})

