import csv

import requests
from pyquery import PyQuery as pq
import re
import time
import pymysql

dbhost = 'localhost'
dbuser = 'root'
passwd = ''
dbname = 'douban'
conn = pymysql.connect(dbhost, dbuser, passwd, dbname, charset='utf8')
cur = conn.cursor()

base_url = 'http://search.cnki.net/Search.aspx?'

header = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Cookie':'Ecp_ClientId=3180522193705248137; cnkiUserKey=b83dcf32-81b9-e7ec-77a5-13d6e9fe2210; UM_distinctid=163a5e6b0721b2-08bec8c28dc04e-39614807-100200-163a5e6b073159; Ecp_IpLoginFail=180821221.133.234.226; CNZZDATA2643871=cnzz_eid%3D462770398-1534811578-null%26ntime%3D1535011982; CNZZDATA3636877=cnzz_eid%3D797925765-1534811377-null%26ntime%3D1535011851; SID_search=201087; ASP.NET_SessionId=yi2i2355peneghvjqqwolozj',
'Host':'search.cnki.net',
'Referer':'http://search.cnki.net/search.aspx?q=author%3a%e7%ab%a0%e5%85%83&rank=relevant&cluster=zyk&val=CJFDTOTAL',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',

}

data = {
'q':'author:盛文军',
'rank':'relevant',
'cluster':'zyk',
'val':'CJFDTOTAL',
'p':'0',
}

def get_content(name):
    print(name)
    data['q'] = 'author:{}'.format(name)
    time.sleep(1.5)
    resp = requests.get(base_url,params=data,headers=header)
    try:
        if resp.status_code ==200:
            doc = pq(resp.text)
            total_text = doc('div.side ul').eq(0)('li').eq(1).text().replace(' ','')
            total_num = re.findall(re.compile(r'>>学术期刊(.*)'), str(total_text))[0][1:-1]
            if int(total_num) >=700:
                row = name,total_num
                print(name,total_num)
                with open('author_data.csv', 'a', encoding='gb18030', newline='') as  f:
                    writer = csv.writer(f)
                    # 写入一行
                    # 写入多行：列表或元组数据
                    writer.writerow(row)
    except Exception as e:
        print(e)
        # if int(total_num)<600:
        #     all_page = int(int(total_num) / 15)
        #     for page in range(0, all_page+1):
        #         data['p'] = page*15
        #         time.sleep(1)
        #         se_resp = requests.get(base_url, params=data, headers=header)
        #         if se_resp.status_code == 200:
        #             doc = pq(se_resp.text)
        #             se_data_list = doc('div.articles div.wz_tab').items()
        #             for item in se_data_list:
        #
        #                 author = item('span.year-count span').eq(0).attr('title')
        #                 if  name in author:
        #                     title = item('h3 a').eq(0).text()
        #                     content_url = item('h3 a').eq(0).attr('href')
        #                     index_text = item('span.year-count span.count').text().split("|")[1].strip()
        #                     index_num = re.findall(re.compile(r'被引次数（(.*?)）'),str(index_text))[0]
        #                     time.sleep(3)
        #                     second_content = pq(requests.get(content_url,headers=header).text)
        #                     institutions = second_content('div.author.summaryRight ')
        #                     institutions_text = institutions('p:contains(【机构】)').text()
        #                     picShow = second_content('div.picShow div.detailLink')
        #                     journal = picShow('a').eq(0).text()
        #                     post_time = picShow('a').eq(3).text()
        #                     print(title, author, index_num, institutions_text, journal, post_time)
        #                     sql = "insert into cnki(title,author,index_,institution,journal,post_time) values (%s,%s,%s,%s,%s,%s)"
        #                     cur.execute(sql,(title,author,index_num,institutions_text,journal,post_time))
        #                     conn.commit()


if __name__ == '__main__':
    with open('学者姓名.csv','r') as f:
        name_list = f.readlines()
        for item in name_list[128:]:#155
            name = item.strip()
            print(name)
            get_content(name)
            
        
        
        
        
