#journal的抓取
import json
import pymysql
import requests
import json
import urllib.parse
import math

conn = pymysql.connect(
    host='43.248.135.49',  # mysql服务器地址
    port=3306,  # 端口号
    user='root',  # 用户名
    passwd='Scool104.',  # 密码
    db='cssci',  # 数据库名称
    charset='utf8',  # 连接编码，根据需要填写
)
cur = conn.cursor()  # 创建并返回游标
a = ''
ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36'
coo = '_ga=GA1.3.1862801744.1582273520; search_history_yw_a=author%3D%26author_type1%3D0%26author_type2%3D1%26title%3D%25E5%2585%25B3%25E4%25BA%258E%25E6%2595%25B0%25E5%25AD%2597%25E5%259B%25BE%25E4%25B9%25A6%25E9%25A6%2586%25E7%259A%2584%25E5%2587%25A0%25E7%2582%25B9%25E8%25AE%25A4%25E8%25AF%2586%26title_type1%3D1%26ywqk%3D%25E6%2583%2585%25E6%258A%25A5%25E8%25B5%2584%25E6%2596%2599%25E5%25B7%25A5%25E4%25BD%259C%26ywqk_type1%3D1%26ywxj%3D%26ywnd1%3D1998%2C1999%2C2000%2C2001%2C2002%2C2003%2C2004%2C2005%2C2006%2C2007%2C2008%2C2009%2C2010%2C2011%2C2012%2C2013%2C2014%2C2015%2C2016%2C2017%2C2018%2C2019%2C2020%2C2021%26ywlx%3D%u8BF7%u9009%u62E9%26ywnd%3D%26search_model%3DAND%26order_type%3Dnum%26order_px%3DDESC%26pagesize%3D20; PHPSESSID=h60oqg8kkc91qn78v8lkinnot4'

def year_and_papers(url):
    headers = {
        'user-agent': ua,
        'cookie': coo
    }
    values = {}
    values['title'] = a
    aa = str(urllib.parse.urlencode(values)).strip('title=')
    params = {
        'control': 'search_base',
        'action': 'search_lysy',
        'title': aa,
        'start_year': '1998',
        'end_year': '2021',
        'pagesize': '20',
        'pagenow': '1',
        'order_type': 'nian',
        'order_px': 'DESC',
        'search_tag': '0',
        'session_key': '519'
    }
    res = requests.get(url, params=params, headers=headers)
    # print(res.url)
    res_json = res.json()
    # print(res_json)
    years_lis = res_json['jl_nan']
    # print(years_lis)
    years_dic = {}
    try:
        for i in years_lis:
            nian = i['nian']
            total = int(i['total'])
            years_dic[nian] = total
    except Exception as e:
        pass
    return years_dic

def request_each_page(url,num,year):
    url_per_page = []
    headers = {
        'user-agent': ua,
        'cookie': coo
    }
    values = {}
    values['title'] = a
    aa = str(urllib.parse.urlencode(values)).strip('title=')
    params = {
        'control': 'search_base',
        'action': 'search_lysy',
        'title': aa,
        'start_year': '1998',
        'end_year': '2021',
        'nian': year,
        'pagesize': '20',
        'pagenow': str(num),
        'order_type': 'nian',
        'order_px': 'DESC',
        'search_tag': '0',
        'session_key': '519'
    }
    res = requests.get(url, params=params, headers=headers)
    res_json = res.json()
    data_lis = res_json['contents']
    print(data_lis)
    try:
        for i in data_lis:
            id = i['sno']
            url_per_page.append(id)
    except Exception as e:
        pass
    return url_per_page

#将各篇文章id传入xhr，然后返回响应的接口数据结果
def id_into_xhr(id_lis):
    for m in id_lis:
        headers = {
            'user-agent': ua,
            'cookie': coo
        }
        params = {
            'control': 'search',
            'action': 'source_id',
            'id': str(m)
        }
        article_api = 'http://cssci.nju.edu.cn/control/controllers.php'
        res = requests.get(article_api, params=params, headers=headers)
        res_json = res.json()
        res_json = json.dumps(res_json,ensure_ascii=False)
        print(res_json)
        jsonstr = json.loads(res_json)
        for j in jsonstr['contents']:
            id = j['id']
            sno = j['sno']
            authors = j['authors']
            authors_address = j['authors_address']
            authors_jg = j['authors_jg']
            blpm = j['blpm']
            byc = j['byc']
            juan = j['juan']
            lypm = j['lypm']
            lypmp = j['lypmp']
            nian = j['nian']
            qi = j['qi']
            qkmc = j['qkmc']
            wzlx = j['wzlx']
            xkdm1 = j['xkdm1']
            xkfl1 = j['xkfl1']
            xmlb = j['xmlb']
            yjdm = j['yjdm']
            ym = j['ym']
            catation = ''
            authorsDetail = ''
            print(id)
            sql = '''insert into `journal` (id,sno,authors,authors_address,authors_jg,blpm,byc,juan,lypm,lypmp,nian,qi,qkmc,wzlx,xkdm1,xkfl1,xmlb,yjdm,ym,catation,authorsDetail) values ("''' + id + '''","''' + sno + '''","''' + authors + '''","''' + authors_address + '''","''' + authors_jg + '''","''' + blpm + '''","''' + byc + '''","''' + juan + '''","''' + lypm + '''","''' + lypmp + '''","''' + nian + '''","''' + qi + '''","''' + qkmc + '''","''' + wzlx + '''","''' + xkdm1 + '''","''' + xkfl1 + '''","''' + xmlb + '''","''' + yjdm + '''","''' + ym + '''","''' + catation + '''","''' + authorsDetail + '''");'''
            print(sql)
            try:
                cur.execute(sql)
                conn.commit()
            except Exception as e:
                continue
    return

def main():
    url = 'http://cssci.nju.edu.cn/control/controllers.php'
    global a
    lis = ['比较法研究+++8+++AND|||','法学+++8+++AND|||','法律科学(西北政法大学学报)+++8+++AND|||','法商研究+++8+++AND|||','法制与社会发展+++8+++AND|||','国家检察官学院学报+++8+++AND|||','环球法律评论+++8+++AND|||','政法论丛+++8+++AND|||','政法论坛+++8+++AND|||','政治与法律+++8+++AND|||','中国法律评论+++8+++AND|||']
    for f in lis:
        a = f
        #先将年份及其对应的篇数字典请求得到
        year_dic = year_and_papers(url)
        for j in year_dic:
            year_dic[j] = math.ceil(year_dic[j]/20)
        print(year_dic)
        for i in year_dic:
            for m in range(1,year_dic[i]+1):
                #挨个传入
                id_lis = request_each_page(url,m,i)
                id_into_xhr(id_lis)
    conn.close()

if __name__ == '__main__':
    main()