import pyodbc
import time
import requests
import random
import re
from fake_useragent import UserAgent
location = r"D:\python_project\fake_useragent.json"
UA = UserAgent(verify_ssl=False, path=location)
from html import unescape
def getHeaders():
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "Accept-Encoding": "gzip, deflate",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "close",
        "Cookie": "__yjs_duid=1_0b47b5e701566c9f8e6734557b7a013b1635408614778; BIDUPSID=75B22D5B29FD0EC30F510FA1F0DB24F2; PSTM=1636075901; BDUSS=3Z2bDhZY1d4R1ZOdkxMS2hWZVZCRVpKeTdUcUlkSHRrMm51UktzN0cxNGpxNkZpRVFBQUFBJCQAAAAAAAAAAAEAAAA7IkplAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACMeemIjHnpic; BDUSS_BFESS=3Z2bDhZY1d4R1ZOdkxMS2hWZVZCRVpKeTdUcUlkSHRrMm51UktzN0cxNGpxNkZpRVFBQUFBJCQAAAAAAAAAAAEAAAA7IkplAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACMeemIjHnpic; BAIDUID=80618DE526CB7C3D1FD5ADFCA08CB312:FG=1; H_WISE_SIDS=234020_110085_259305_266761_256154_268592_259642_269391_188331_269731_269831_269904_271022_271170_271174_267659_271323_271350_271268_269610_271470_270102_271562_269785_271813_272162_270179_270055_272284_266565_272008_271544_272079_271905_272675_272607_272817_272828_272802; H_WISE_SIDS_BFESS=234020_110085_259305_266761_256154_268592_259642_269391_188331_269731_269831_269904_271022_271170_271174_267659_271323_271350_271268_269610_271470_270102_271562_269785_271813_272162_270179_270055_272284_266565_272008_271544_272079_271905_272675_272607_272817_272828_272802; BDSFRCVID=j8tOJexroG0tfObqKjpKK7hcoOGMFYQTDYLEOwXPsp3LGJLVcCB9EG0Pt_NNvcP-ox_CogKK02OTHz0F_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF=tRk8oI-XJCvDqTrP-trf5DCShUFsW6LLB2Q-XPoO3KJMel7PKfR83ttUjU722MRxBmckbxbgylRp8P3y0bb2DUA1y4vp5MJGfmTxoUJ2XhrFfROMqtnWBUtebPRiJTj9QgbEopQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0hD0wD58BjjPVKgTa54cbb4o2WbCQ2K_28pcN2b5oQT83b4nabxnBQCni_bnJab5vOIJTXpOUWfAkXpJvQnJjt2JxaqRCKhOIbq5jDh3MBUCN5xbTe4ROX27y0hvctIocShnuDMjrDRLbXU6BK5vPbNcZ0l8K3l02V-bIe-t2XjQhDHt8JT08JR3aQ5rtKRTffjrnhPF3h-DvXP6-hnjy3bRJ0q7t-lv8SDjojP7jhU41XtjkQl3RymJ42-39LPO2hpRjyxv4-UtYetoxJpOJX2owLJOxHR7W_pOvbURvD5Dg3-7EJU5dtjTO2bc_5KnlfMQ_bf--QfbQ0hOhqP-jBRIEoC0XtI-BMDvPKITD-tFO5eT22-us2Tnl2hcHMPoosIJmbxnDKfPq3q6it-v2aKTiaKJjBMbUoqRHXnJi0btQDPvxBf7pK27GVl5TtUJM_pr854bmqt4bBPbyKMnitnr9-pnLtpQrh459XP68bTkA5bjZKxtq3mkjbPbDfn028DKuDjtBD53-jHRabK6aKC5bL6rJabC3jCnmXU6q2bDeQN3JKfRa-m3BQpvcJbnjDp7jXfjZ0q0vWtv4WbbvLT7johRTWqR4HpOtefonDh83BPTw-tJTHCOOMhOO5hvvhKoO3M70DMKmDloOW-TB5bbPLUQF5l8-sq0x0bOte-bQXH_E5bj2qRFeoK-b3j; BD_HOME=0; Hm_lvt_f28578486a5410f35e6fbd0da5361e5f=1695264874,1696843654; BD_CK_SAM=1; Hm_lpvt_f28578486a5410f35e6fbd0da5361e5f=1696920436; BDSVRTM=1570; BDRCVFR[feWj1Vr5u3D]=I67x6TjHwwYf0; delPer=0; PSINO=2; ZFY=genjKK5B1YcHoBbVspNfyJLk9juw4picTiAIXvAvYMA:C; BAIDUID_BFESS=80618DE526CB7C3D1FD5ADFCA08CB312:FG=1; BDSFRCVID_BFESS=j8tOJexroG0tfObqKjpKK7hcoOGMFYQTDYLEOwXPsp3LGJLVcCB9EG0Pt_NNvcP-ox_CogKK02OTHz0F_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF_BFESS=tRk8oI-XJCvDqTrP-trf5DCShUFsW6LLB2Q-XPoO3KJMel7PKfR83ttUjU722MRxBmckbxbgylRp8P3y0bb2DUA1y4vp5MJGfmTxoUJ2XhrFfROMqtnWBUtebPRiJTj9QgbEopQ7tt5W8ncFbT7l5hKpbt-q0x-jLTnhVn0MBCK0hD0wD58BjjPVKgTa54cbb4o2WbCQ2K_28pcN2b5oQT83b4nabxnBQCni_bnJab5vOIJTXpOUWfAkXpJvQnJjt2JxaqRCKhOIbq5jDh3MBUCN5xbTe4ROX27y0hvctIocShnuDMjrDRLbXU6BK5vPbNcZ0l8K3l02V-bIe-t2XjQhDHt8JT08JR3aQ5rtKRTffjrnhPF3h-DvXP6-hnjy3bRJ0q7t-lv8SDjojP7jhU41XtjkQl3RymJ42-39LPO2hpRjyxv4-UtYetoxJpOJX2owLJOxHR7W_pOvbURvD5Dg3-7EJU5dtjTO2bc_5KnlfMQ_bf--QfbQ0hOhqP-jBRIEoC0XtI-BMDvPKITD-tFO5eT22-us2Tnl2hcHMPoosIJmbxnDKfPq3q6it-v2aKTiaKJjBMbUoqRHXnJi0btQDPvxBf7pK27GVl5TtUJM_pr854bmqt4bBPbyKMnitnr9-pnLtpQrh459XP68bTkA5bjZKxtq3mkjbPbDfn028DKuDjtBD53-jHRabK6aKC5bL6rJabC3jCnmXU6q2bDeQN3JKfRa-m3BQpvcJbnjDp7jXfjZ0q0vWtv4WbbvLT7johRTWqR4HpOtefonDh83BPTw-tJTHCOOMhOO5hvvhKoO3M70DMKmDloOW-TB5bbPLUQF5l8-sq0x0bOte-bQXH_E5bj2qRFeoK-b3j; H_PS_PSSID=39312_39530_39418_39436_39526_39497_39476_39462_39234_26350_22160; ZD_ENTRY=baidu; ab_sr=1.0.1_ZmFiODZmMWJjMTI5MWNhMjg0MDYzMWExNmY1ZDExYzgwYjZhOTYwOTU4Njk5ZDRkNDg1YjkyZDI4MzY4ZTYxZTFiOTYzNTVlN2U3ODY0MDZhYTljNGNhNzRjY2EzNjk3MDAwNzhjNzgzYWMzOGQzNTJlNWMzNDE3NWNiMzMzZTg3NzFhODQ1ODE4YzE0MDE3YzVlNjUxZGY5NzFiMjdjZQ==; Hm_lvt_483cb5a97cf7a40bc5e4f07bebcb5a14=1696905582; Hm_lpvt_483cb5a97cf7a40bc5e4f07bebcb5a14=1698202587; antispam_data=cdedcf0b813062170aa01de61a7a3b468cf068a34866440e0d3016c884023ffcff976f7303b2540fed28b3dc91bbec11efcd635057e800502cf5ef92bc392f2ccd393b512f694ff7f7c915db44b70564cacbb19e88a9852559d91c1e142d25b7; antispam_key_id=45; antispam_sign=b68dcba4; antispam_site=ae_xueshu_journal_page",
        "Host": "xueshu.baidu.com",
        # "Referer": "https://www.xuexi.cn/f927c245be854d1fab1789ea7e0b40f3/90fcecad01824c42acad7153fc552356.html",
        "User-Agent": UA.random,
        "Upgrade-Insecure-Requests": "1"
    }
    return headers
def read_access():
    sql = "SELECT 刊种地址 FROM book WHERE 状态 = '未处理'"
    info_tuple = cursor.execute(sql).fetchall()
    for info in info_tuple:
        url = info[0]
        getIn(url)
def getIn(url):
    print(url)
    try:
        time.sleep(round(random.uniform(4, 6), 1))
        response = requests.get(url=url, headers=getHeaders(), timeout=60).content.decode('utf-8')
        # print(response)
        """"""
        # 顶部基本信息
        topInfo = re.search('// 顶部基本信息(.*?)// 媒体链接', response, re.S)
        if topInfo != None:
            topInfo = topInfo.group(1).strip()
            if topInfo == 'basicInfo: null,':
                id = ''
                entity_id = ''
                journal_uri = ''
                name = ''
                db_short_name = ''
                cover = ''
                publishing_cycle = ''
                ISSN = ''
                paper_cnt = ''
                cited_cnt = ''
                factor = ''
                used_name = ''
                lang = ''
                supervisor = ''
                organization = ''
                CN = ''
                address = ''
                postal_code = ''
                mail = ''
                db_full_name = ''
                honor = ''
            else:
                if topInfo.endswith(',') == True:
                    topInfo = topInfo[:-1]
                else:
                    topInfo = topInfo
                topInfo = topInfo.replace('basicInfo: ', '')
                topInfo = eval(topInfo)
                # print(type(topInfo))
                print(topInfo)
                if 'id' in topInfo.keys():
                    id = topInfo['id']
                else:
                    id = ''
                if 'entity_id' in topInfo.keys():
                    entity_id = topInfo['entity_id']
                else:
                    entity_id = ''
                if 'journal_uri' in topInfo.keys():
                    journal_uri = topInfo['journal_uri']
                else:
                    journal_uri = ''
                if 'name' in topInfo.keys():
                    name = unescape(topInfo['name']).strip()
                else:
                    name = ''
                # print(id, entity_id, journal_uri, name)
                if 'db_short_name' in topInfo.keys():
                    db_short_name = topInfo['db_short_name']
                    if len(db_short_name) != 0:
                        db_short_name = ';'.join(db_short_name)
                    else:
                        db_short_name = ''
                else:
                    db_short_name = ''
                if 'cover' in topInfo.keys():
                    cover = topInfo['cover'].replace('\/', '/')
                else:
                    cover = ''
                if 'publishing_cycle' in topInfo.keys():
                    publishing_cycle = topInfo['publishing_cycle']
                else:
                    publishing_cycle = ''
                if 'ISSN' in topInfo.keys():
                    ISSN = topInfo['ISSN']
                else:
                    ISSN = ''
                # print(db_short_name, cover, publishing_cycle, ISSN)
                if 'paper_cnt' in topInfo.keys():
                    paper_cnt = topInfo['paper_cnt']
                else:
                    paper_cnt = ''
                if 'cited_cnt' in topInfo.keys():
                    cited_cnt = topInfo['cited_cnt']
                else:
                    cited_cnt = ''
                if 'factor' in topInfo.keys():
                    factor = topInfo['factor']
                else:
                    factor = ''
                if 'used_name' in topInfo.keys():
                    used_name = unescape(topInfo['used_name']).strip()
                else:
                    used_name = ''
                if 'lang' in topInfo.keys():
                    lang = topInfo['lang']
                else:
                    lang = ''
                # print(paper_cnt, cited_cnt, factor, used_name, lang)
                if 'supervisor' in topInfo.keys():
                    supervisor = unescape(topInfo['supervisor']).strip()
                else:
                    supervisor = ''
                if 'organization' in topInfo.keys():
                    organization = unescape(topInfo['organization']).strip()
                else:
                    organization = ''
                if 'CN' in topInfo.keys():
                    CN = topInfo['CN'].replace('\/', '/')
                else:
                    CN = ''
                # print(supervisor, organization, CN)
                if 'address' in topInfo.keys():
                    address = unescape(topInfo['address']).strip()
                else:
                    address = ''
                if 'zip_code' in topInfo.keys():
                    postal_code = topInfo['zip_code']
                else:
                    postal_code = ''
                if 'mail' in topInfo.keys():
                    mail = topInfo['mail']
                else:
                    mail = ''
                # print(address, postal_code, mail)
                if 'db_full_name' in topInfo.keys():
                    db_full_name = topInfo['db_full_name']
                    if len(db_full_name) != 0:
                        db_full_name = '/'.join(db_full_name)
                    else:
                        db_full_name = ''
                else:
                    db_full_name = ''
                if 'honor' in topInfo.keys():
                    honor = topInfo['honor']
                    # print(honor)
                    try:
                        honor = eval(topInfo['honor'])
                        # print(honor)
                        if len(honor) != 0:
                            honor = ';'.join(honor)
                        else:
                            honor = ''
                    except Exception as e:
                        # print(e)
                        honor = honor.replace('[', '').replace(']', '')
                        if honor.startswith('"') == True:
                            honor = honor[1:]
                        else:
                            honor = honor
                        if honor.endswith('"') == True:
                            honor = honor[:-1]
                        else:
                            honor = honor
                else:
                    honor = ''
        else:
            id = ''
            entity_id = ''
            journal_uri = ''
            name = ''
            db_short_name = ''
            cover = ''
            publishing_cycle = ''
            ISSN = ''
            paper_cnt = ''
            cited_cnt = ''
            factor = ''
            used_name = ''
            lang = ''
            supervisor = ''
            organization = ''
            CN = ''
            address = ''
            postal_code = ''
            mail = ''
            db_full_name = ''
            honor = ''
        # 媒体链接
        urlInfo = re.search('// 媒体链接(.*?)scUrlComponent', response, re.S)
        if urlInfo != None:
            urlInfo = urlInfo.group(1).strip()
            if urlInfo == 'mediaInfo: null,':
                taxonomy_1 = ''
                taxonomy_2 = ''
            else:
                if urlInfo.endswith(',') == True:
                    urlInfo = urlInfo[:-1]
                else:
                    urlInfo = urlInfo
                urlInfo = urlInfo.replace('mediaInfo: ', '')
                urlInfo = eval(urlInfo)
                print(urlInfo)
                for media in urlInfo:
                    if media['taxonomy'] == '投稿链接':
                        taxonomy_1 = media['value'].replace('\/', '/')
                    else:
                        pass
                    if media['taxonomy'] == '官方站点':
                        taxonomy_2 = media['value'].replace('\/', '/')
                    else:
                        pass
        else:
            taxonomy_1 = ''
            taxonomy_2 = ''
        save_access(id, entity_id, journal_uri, name, db_short_name, cover, publishing_cycle, ISSN, paper_cnt,
                    cited_cnt, factor, used_name, lang, supervisor, organization, CN, address, postal_code, mail,
                    db_full_name, honor, taxonomy_1, taxonomy_2, url)
        # print(id, entity_id, journal_uri, name)
        # print(db_short_name, cover, publishing_cycle, ISSN)
        # print(paper_cnt, cited_cnt, factor, used_name, lang)
        # print(supervisor, organization, CN)
        # print(address, postal_code, mail)
        # print(db_full_name, honor)
        # print(taxonomy_1, taxonomy_2)

    except Exception as e:
        print(e)
        getIn(url)
def save_access(id, entity_id, journal_uri, name, db_short_name, cover, publishing_cycle, ISSN, paper_cnt, cited_cnt, factor, used_name, lang, supervisor, organization, CN, address, postal_code, mail, db_full_name, honor, taxonomy_1, taxonomy_2, url):
    status = '已下载'
    update_sql = "UPDATE book SET [id]='%s',entity_id='%s',journal_uri='%s',英文刊种名称='%s',标签='%s',封面链接='%s'," \
                 "出版周期='%s',ISSN='%s',CN='%s',发文量='%s',被引量='%s',影响因子='%s',曾用名='%s',语种='%s',主管='%s'," \
                 "主办单位='%s',地址='%s',邮政编码='%s',Email='%s',被收录数据库包含='%s',期刊荣誉='%s',官方站点='%s'," \
                 "投稿链接='%s', 状态='%s' WHERE 刊种地址='%s'" % (id, entity_id, journal_uri, name.replace("'", "''"), db_short_name.replace("'", "''"),
                 cover, publishing_cycle.replace("'", "''"), ISSN, CN, paper_cnt, cited_cnt, factor,
                 used_name.replace("'", "''"), lang, supervisor.replace("'", "''"), organization.replace("'", "''"),
                 address.replace("'", "''"), postal_code, mail, db_full_name.replace("'", "''"), honor.replace("'", "''"),
                 taxonomy_2, taxonomy_1, status, url)
    try:
        cursor.execute(update_sql)
        cursor.commit()
        print(url + '更新完成~~~')
    except Exception as e:
        print(e)
        cursor.rollback()
if __name__ == '__main__':
    access_database_file = r"D:\我的程序\百度学术\期刊名称及刊种地址.mdb"
    conn = pyodbc.connect(r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};DBQ=%s;"' % access_database_file)
    cursor = conn.cursor()
    read_access()
    # getIn('https://xueshu.baidu.com/usercenter/journal/baseinfo?cmd=journal_page&entity_id=1819c2650220c89b70b28aa60dbdda98')
    # print(unescape('\u5b98\u65b9\u7ad9\u70b9'))
    cursor.close()
    conn.close()