from django.shortcuts import render

# Create your views here.
from django.http import HttpResponse
from django.shortcuts import render

# Create your views here.
from django.http import HttpResponse
import json
import requests
import time
from bs4 import BeautifulSoup
import time
import re
import json
from bs4 import BeautifulSoup
import urllib

#返回结果格式
def resultData():
    data = {
        "websiteType" : 0,
        "titleName":"",
        "titleHref": "",
        "source":"",
        "label":"",
        "volume":"",
        "summary":"",
        "detailId":'',
        "publishTime" :"",
        "category" : "",
        "quote" :"",
        "abstract": "",
        "doi": "",
        "downLoad": "",
        "keyword": [],
        "author": [],

    }
    return data

def getAuthors(ass):
    all = []
    for a in ass:
        all.append(a.text)
    return all

def formData(response):
    allData = []
    # 获取sid并且更新返回
    soup = BeautifulSoup(response.text, "html5lib")


    dls = soup.select("#remark > dl")
    for dl in dls:
        tempData = resultData()
        try:
            tempData["websiteType"] = 2
        except:
            pass
        try:
            tempData["titleName"] = dl.select("dt")[0].select("a")[0].text
        except:
            pass
        try:
            tempData["titleHref"] = "http://lib.cqvip.com" + dl.select("dt")[0].select("a")[0]["href"]
        except:
            pass
        try:
            tempData["source"] = dl.select(".from")[0].select("a")[0].text
        except:
            pass

        try:
            dd = dl.select(".vol")[0].text
            px1 = re.compile("(.*?)年第(.+?)期")
            da = px1.findall(dd)
            tempData["publishTime"] = da[0][0]
        except:
            pass

        try:
            tempData["author"] = getAuthors(dl.select(".author a"))
        except:
            raise

        try:
            tempData["abstract"] = dl.select(".abstract > span")[0].text
        except:
            pass

        allData.append(tempData)

    return allData

def getData(keyword):


    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Origin': 'http://qikan.cqvip.com',
        'Upgrade-Insecure-Requests': '1',
        'Content-Type': 'application/x-www-form-urlencoded',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Referer': 'http://qikan.cqvip.com/',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    }

    params = (
        ('from', 'index'),
    )

    data = {
        'key': 'U={}'.format(keyword),
        'isNoteHistory': '1',
        'isLog': '1',
        'indexKey': keyword,
        'indexIdentifier': 'U'
    }


    response = requests.post('http://qikan.cqvip.com/Qikan/Search/Index', headers=headers, params=params,
                             data=data)

    allData = formData(response)


    return allData
    # NB. Original query string below. It seems impossible to parse and
    # reproduce query strings 100% accurately so the one below is given
    # in case the reproduced version is not "correct".
    # response = requests.get('http://qikan.chaoxing.com/searchjour?adv=T%3D%E7%BB%86%E8%83%9E+AND+A%3Dz&aorp=p&size=50', headers=headers, cookies=cookies)


def index(request):

    data = json.loads(request.POST.get('data', '1'))

    allData= getData(data["values"][0])

    return HttpResponse(json.dumps({
        "collection": allData,
        "allNum": 0
    }), content_type="application/json")