# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
from pymongo import MongoClient
from utils.HttpUtils import HttpUtils


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告


"""
万方数据库
http://www.wanfangdata.com.cn/index.html
http://www.wanfangdata.com.cn/searchResult/getAdvancedSearch.do?searchType=#a_001
"""


class Contents():
    def __init__(self):
        #声明一个CookieJar对象实例来保存cookie
        # requests.utils.dict_from_cookiejar(html.cookies)  # 获取cookies转dict
        self.cookie = cookiejar.CookieJar()
        ua = UserAgent(use_cache_server=False)  # 禁用服务器缓存
        self.headers = {
            # 'User-Agent': ua.random,
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Connection': 'keep-alive',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Cookie': 'JSESSIONID=1B2032D1493A2239E13CFC6BEF2B606E; zh_choose=n; firstvisit_backurl=http%3A//www.wanfangdata.com.cn; Hm_lvt_838fbc4154ad87515435bf1e10023fab=1589675660; Hm_lpvt_838fbc4154ad87515435bf1e10023fab=1589678839; SEARCHHISTORY_0=UEsDBBQACAgIACZOsVAAAAAAAAAAAAAAAAABAAAAMO3aW2sbRxQA4P%2Bih5IUuZnLzs0Qii61rcQK%0AsWVLlkspq71J1mpX2l1ZlxJoKS2ltIS%2BlVIIFNq35LmQtn%2BmdpL%2Bix4nuH3ZWRCUegPzpGV2R9J%2B%0AnJ05c2Y%2F%2FKSS2YPQe2BPvMp2NA%2FDamXkVrYrvjNc%2B2mGKEKVamWeeknLvb4g9ezEGR6tptCFwMkk%0AhA533rQeeuk8zO4EXlZzz%2B3I8dzO6%2Fb33Pj9DHrcXfjT%2BSAcpcOtqZeM4o%2FtJBs5dlh1vSDxvK03%0AH%2F%2B0OnHke4kH37N1dXjd%2Fk6aJXbmBau7ty5%2Bff7XT99v37p8%2FN3Lz59e%2FvHFiyc%2F3779bhNObxOE%0A%2BRZB5OoWvOU08dJ0FEfwZ3M7wUXZ63uqvPjxyeVXX%2F%2F56WeXT3%2B5%2BP1bOLj47YdXz57BFdF8shPP%0AI7CgTFQrTuLBDx2NrvQwk4pLhCwCaI%2BqOtnljtd2Tp2YKQO7AaxQShHElB62Xefq7NjpCMWM7Gay%0AkgKaXpYGw921XcuUMDG7mSyxLEDTy7oDgmRvLKmFjexGspgiQNPLinXcUOkeVpQa2U1kpRAM0Apk%0A7bN4%2F35wXxAzzm4oizmg6WVH69li%2BUFSI1yUWlZn9J9DEsx4riRDCpD0ksF%2Bt907jwOsyp2%2BXiO9%0AfP7lq8ffaA3%2FPb1RGAoI8lw%2FzigDGr1ff5itvIP%2B0ir5M35TfpaFraInuX4%2BXvijpsswN355fhQC%0AkOv9Dvxmb9nYP4Xc1Pjl%2BSEBNHq%2FtFmfLvzIodKMf3l%2BVCGgKZiJ3XprmDQkJ%2BXOHm%2FMj2OgKVgx%0Axv26H9bPMC33uubG%2FCxYwBSsXgai1a%2B1XVsgYvzy%2FKgEGr3fbHfZ6hwfOBYx8ZfjxwQjQKP3mzda%0ArXF7hqXJn%2FP9LCKL8men55x0mykjzOR%2FuX5UAo3ezxoP0p1G2FDKjH%2F58ceBRu8Xh6cHQgwCLk3%2B%0Akjd%2FYMGBpiD%2FQzUiu42RoCb%2B8vMXAjR6v87hbGh3z04YNeNfrh%2BzgEbvx1ds3JtOV9Iyz2%2BunxRA%0Ao%2FdbPwz2BqlgmJj6Qf7694qmwK%2Fu98ZInEiTv%2BTXX4iSRfnL4Z5dj9ru4u2o4%2F%2F%2F1T%2BBUEH1%2FmTa%0APYyc%2FUAhy%2Fjl%2BSkONAWzb5qGyyTdU8qsfnN3P5QAGr1fb9a%2Fh1cLLPDbMXvc4D4cpoCkl4x6adr1%0AlrtKljsSS7hXzCWg6WXHs%2BOGm03GhJW7QlM6WYUJAzS97L0H7sMZ6TUoN7IbygoJaAW7orJ5zLu7%0Ae4SVe14vnyyxFKAV1MtivyXbfGIJI7vpu48S0AredmpO%2BWQZdxgvdy5QNlmJEBWAppc9OndP%2FclO%0AqkzMbiiLGQG0Rx%2F9DVBLBwh1AeYS8AMAAL4vAAA%3D%0A',
            'DNT': '1',
            'Host': 'www.wanfangdata.com.cn',
            'Origin': 'http://www.wanfangdata.com.cn',
            'Referer': 'http://www.wanfangdata.com.cn/searchResult/getAdvancedSearch.do?searchType=',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'


        }


    def get_contents(self, cursor):
        postData = {
            'paramStrs': '(主题:(发热待查))*Date:2016-2020',
            'updateDate':'',
            'classType': 'wfpublish-perio_artical,degree-degree_artical,conference-conf_artical',
            'pageNum': '1',
            'pageSize':'',
            'sortFiled':'',
            'isSearchSecond': 'false',
            'chineseEnglishExpand': 'false',
            'topicExpand': 'false',
            'searchWay': 'AdvancedSearch',
            'corePerio': 'false',
            'rangeParam':''

        }
        url = f"http://www.wanfangdata.com.cn/searchResult/getCoreSearch.do?d=0.7869742666014199"
        html = HttpUtils.do_request("POST", url, self.headers, postData)
        data_json = json.loads(html.text)
        for d in data_json['pageRow']:
            dict_data = dict()
            dict_data['title'] = d['title'].replace("<em>", "").replace("</em>", "")  # 标题
            dict_data['authors_name'] = "|".join(d['authors_name'])  # 作者
            dict_data['perio_title'] = d['perio_title']  # 来源
            dict_data['summary'] = d['summary'].replace("<em>", "").replace("</em>", "")  # 摘要
            dict_data['text_keywords'] = "|".join(d['text_keywords'])  # 关键词
            # 作者及所属医院
            tag_num = 1
            for a in d['author_info']:
                dict_data['author_info_' + str(tag_num)] = a['author_name'] + "|" + a['author_unit']
                tag_num += 1


if __name__ == '__main__':
    contents = Contents()
    for i in range(1, 11):
        print(f"============第【{i}】页=============")
        contents.get_contents(i)
