from django.shortcuts import render

# Create your views here.
from django.http import HttpResponse
import requests
import time
from bs4 import BeautifulSoup
import re
import json
import urllib

#返回结果格式
def resultData():
    data = {
        "websiteType" : 0,
        "titleName":"",
        "titleHref": "",
        "source":"",
        "label":"",
        "volume":"",
        "summary":"",
        "detailId":'',
        "publishTime" :"",
        "category" : "",
        "quote" :"",
        "abstract": "",
        "doi": "",
        "downLoad": "",
        "keyword": [],
        "author": [],

    }
    return data

def getSearchTerm(keyword):
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    }

    params = (
        ('searchtext', keyword),
    )

    response = requests.get('https://pubs.rsc.org/en/results', headers=headers, params=params)

    # NB. Original query string below. It seems impossible to parse and
    # reproduce query strings 100% accurately so the one below is given
    # in case the reproduced version is not "correct".
    # response = requests.get('https://pubs.rsc.org/en/results?searchtext=cell', headers=headers)
    soup = BeautifulSoup(response.text, "html5lib")

    return soup.select("#SearchTerm")[0]["value"]


def formData(response):

    soup = BeautifulSoup(response.text, "html5lib")

    lis = soup.select(".capsule--article")


    allData = []

    for li in lis:
        tempData = resultData()

        try:
            tempData["titleName"] = li.select(".capsule__title")[0].text.strip()
        except:
            pass
        try:
            tempData["titleHref"] = li.select(".capsule__action")[0]["href"]
        except:
            pass

        try:
            tempData["author"] = [li.select(".article__authors")[0].text.strip()]
        except:
            pass

        try:
            tempData["abstract"] = li.select(".capsule__text")[0].text.strip()
        except:
            pass

        allData.append(tempData)

    return allData

def getData(searchterm):

    headers = {
        # 'X-NewRelic-ID': 'VQYFWF9aDBABV1laBgcFUw==',
        'Origin': 'https://pubs.rsc.org',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.96 Safari/537.36',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Accept': 'text/html, */*; q=0.01',
        'Referer': 'https://pubs.rsc.org/en/results?searchtext=cell',
        'X-Requested-With': 'XMLHttpRequest',
        'Connection': 'keep-alive',
    }

    data = {
        'searchterm': searchterm,
        'resultcount': '253795',
        'category': 'all',
        'pageno': '1'
    }

    response = requests.post('https://pubs.rsc.org/en/search/journalresult', headers=headers,
                             data=data)

    allData = formData(response)


    return allData


def index(request):

    data = json.loads(request.POST.get('data', '1'))

    cookies = getSearchTerm(data["values"][0])



    allData = getData(cookies)

    return HttpResponse(json.dumps({
        "collection": allData,
        "allNum": 0
    }), content_type="application/json")