#http://ydyl.drcnet.com.cn
from django.shortcuts import render

# Create your views here.
from django.http import HttpResponse
import json
import requests
import time
from bs4 import BeautifulSoup
import time
import re
import json
import urllib

#返回结果格式
def resultData():
    data = {
        "websiteType" : 0,
        "titleName":"",
        "titleHref": "",
        "source":"",
        "label":"",
        "volume":"",
        "summary":"",
        "detailId":'',
        "publishTime" :"",
        "category" : "",
        "quote" :"",
        "abstract": "",
        "doi": "",
        "downLoad": "",
        "keyword": [],
        "author": [],

    }
    return data

def formData(response):
    allData = []
    # 获取sid并且更新返回
    soup = BeautifulSoup(response, "html5lib")

    lis = soup.select(".list")

    for li in lis:
        tempData = resultData()

        try:
            tempData["titleName"] = li.select("h3 > a")[0].text
        except:
            pass
        # try:
        #     tempData["titleHref"] = titles[i].select("a")[0]["href"]
        # except:
        #     pass
        # try:
        #     tempData["keyword"] = [subtitles[i].select("u")[0].text]
        # except:
        #     pass
        try:
            tempData["publishTime"] = li.select("div")[-1].text
        except:
            pass
        try:
            tempData["abstract"] = li.select("div")[-2].text
        except:
            pass

        allData.append(tempData)

    return allData

def getData(keyword):

    headers = {
        'Origin': 'http://tk.cepiec.com.cn',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
        'Proxy-Authorization': 'Basic cWY6cWYzLjE0MTU=',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Accept': 'application/json, text/javascript, */*; q=0.01',
        'Referer': 'http://tk.cepiec.com.cn/SP/tm/index1.php',
        'X-Requested-With': 'XMLHttpRequest',
        'Proxy-Connection': 'keep-alive',
    }

    data = {
        'fullsearch': '1',
        'pagerecs': '20',
        'pageto': '1',
        'acttype': 'query',
        'qrynewstype': '',
        'qrydate': '',
        'qryymd': '',
        'qryverno': '',
        'qrytype': '',
        'id': '',
        'n': '',
        'qrygroup': '1',
        'qrysubject': '',
        'qrygrptype': '',
        'qryvolno': '',
        'qrycolumn': '',
        'qrydate1': '',
        'qrydate2': '',
        'qryfind': keyword,
        'qryall': '1',
        'qryop1': 'AND',
        'qryfind1': '',
        'qryfield1': '',
        'qryordf1': 'newstype',
        'qryordby1': 'ASC',
        'qryop2': 'AND',
        'qryfind2': '',
        'qryfield2': '',
        'qryordf2': 'date',
        'qryordby2': 'ASC',
        'qrysame': '1',
        'qryword': '',
        'qryordf3': 'verno',
        'qryordby3': 'ASC',
        'qryordf4': 'head',
        'qryordby4': 'ASC',
        'qryordf5': 'author',
        'qryordby5': 'ASC',
        'qrysave': '1',
        'qryout': ''
    }

    response = requests.post('http://tk.cepiec.com.cn/SP/tm/tm_xunsearch.php', headers=headers,data=data)

    response = response.json()["html"]

    allData = formData(response)

    return allData
    # NB. Original query string below. It seems impossible to parse and
    # reproduce query strings 100% accurately so the one below is given
    # in case the reproduced version is not "correct".
    # response = requests.get('http://qikan.chaoxing.com/searchjour?adv=T%3D%E7%BB%86%E8%83%9E+AND+A%3Dz&aorp=p&size=50', headers=headers, cookies=cookies)


def index():

    data = {"values": ["劳动","张"], "type": ["TI","AU"], "log": ["and"]}

    allData = getData(data["values"][0])

    print(allData)



index()