import requests
from bs4 import BeautifulSoup
import re

#返回结果格式
def resultData():
    data = {
        "websiteType" : 0,
        "titleName":"",
        "titleHref": "",
        "source":"",
        "label":"",
        "volume":"",
        "summary":"",
        "detailId":'',
        "publishTime" :"",
        "category" : "",
        "quote" :"",
        "abstract": "",
        "doi": "",
        "downLoad": "",
        "keyword": [],
        "author": [],

    }
    return data

def formData(response):
    allData = []
    # 获取sid并且更新返回
    soup = BeautifulSoup(response['html'], "html5lib")

    lis = soup.select(".main-box .list")

    for li in lis:
        tempData = resultData()
        try:
            tempData["titleName"] = li.select(".showbox")[0].text.strip()
        except:
            pass
        try:
            tempData["category"] = li.select("td")[2].text.strip();
        except:
            pass
        allData.append(tempData)

    return allData

def getData(keyword):
    headers = {
        'Pragma': 'no-cache',
        'Origin': 'http://tk.cepiec.com.cn',
        'Accept-Encoding': 'gzip, deflate',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36',
        'Proxy-Authorization': 'Basic cWY6cWYzLjE0MTU=',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
        'Cache-Control': 'no-cache',
        'X-Requested-With': 'XMLHttpRequest',
        'Proxy-Connection': 'keep-alive',
        'Referer': 'http://tk.cepiec.com.cn/SP/tm/index1.php',
    }

    data = [
        ('fullsearch', '1'),
        ('pagerecs', '20'),
        ('pageto', '1'),
        ('acttype', 'query'),
        ('qrynewstype', ''),
        ('qrydate', ''),
        ('qryymd', ''),
        ('qryverno', ''),
        ('qrytype', ''),
        ('id', ''),
        ('n', ''),
        ('qrygroup', '1'),
        ('qrysubject', ''),
        ('qrygrptype', ''),
        ('qryvolno', ''),
        ('qrycolumn', ''),
        ('qrydate1', ''),
        ('qrydate2', ''),
        ('qryfind', keyword),
        ('qryall', '1'),
        ('qryop1', 'AND'),
        ('qryfind1', ''),
        ('qryfield1', ''),
        ('qryordf1', 'newstype'),
        ('qryordby1', 'ASC'),
        ('qryop2', 'AND'),
        ('qryfind2', ''),
        ('qryfield2', ''),
        ('qryordf2', 'date'),
        ('qryordby2', 'ASC'),
        ('qrysame', '1'),
        ('qryword', ''),
        ('qryordf3', 'verno'),
        ('qryordby3', 'ASC'),
        ('qryordf4', 'head'),
        ('qryordby4', 'ASC'),
        ('qryordf5', 'author'),
        ('qryordby5', 'ASC'),
        ('qrysave', '1'),
        ('qryout', ''),
        ('searchrows', '1565'),
        ('qryop3', 'AND'),
        ('qryfind3', ''),
        ('qryresult', '1'),
        ('qrygroup1', 'main'),
        ('pagelink', ''),
        ('chcnt', '0'),
        ('setpagerecs', '20'),
    ]

    response = requests.post('http://tk.cepiec.com.cn/SP/tm/tm_xunsearch.php', headers=headers, data=data)
    jsondate = response.text
    print(jsondate)
    exit()
    #需要cookie
    print(jsondate['html'])
    exit()
    allData = formData(response.json())

    return allData


def index():


    # page = 1
    data = {"values": ["报刊"], "type": ["TI"], "log": [""]}

    allData= getData(data["values"][0])

    print(allData)
    # exit()




index()