import datetime
import time
import json
import pandas as pd
from fake_useragent import UserAgent
from urllib.parse import quote, unquote
from tqdm import tqdm
import random
import requests

USER_AGENT_LIST = [
    'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1464.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.3319.102 Safari/537.36',
    'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36',
    'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0.6',
    'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1468.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2224.3 Safari/537.36',
    'Mozilla/5.0 (X11; CrOS i686 3912.101.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.116 Safari/537.36',
    'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:90.0) Gecko/20100101 Firefox/90.0'
]

headers = {
    'User-agent': str(UserAgent(verify_ssl=False).random),
    # 'Cookie':'Hm_lvt_df640d0b13edcfb2bad5f8d5e951c90e=1638775812; _ga_313DD262YW=GS1.1.1638775813.1.1.1638777105.0; _ga=GA1.2.2027484091.1638775813; passport_csrf_token_default=09d85aad0e7e3bfdf663bf13dc3fc9f0; passport_csrf_token=09d85aad0e7e3bfdf663bf13dc3fc9f0; i18next=zh; Hm_lvt_c36ebf0e0753eda09586ef4fb80ea125=1640738626,1641293327,1641361916,1641437449; ttcid=5d90096620d346278dc4f8a6da450ec811; MONITOR_WEB_ID=5ad84cbb-3d64-4443-a63c-04f1056a66a0; MONITOR_DEVICE_ID=3c1a7375-9d7d-4a98-a7e5-d0b914085f45; n_mh_count=BS…zYzM4MzVhZDliYzg1ZDMyYmUxNzRlYWNjNDI0ZDQKCRDzpdqOBhiRGhoCbGYiIDc5MTYwMjE2OTUxNDk5YTYwYmMwZDdhYmVjNDZiOTc0; odin_tt=c6f77ae703b8291ce5f80a9be7f5ae7b9434a72818997c77265c8b380d0f544a9dfb31df4517277f9e2ae8da8dcc28d94e97a6718c956b2c9b73f407c618a51f; x-jupiter-uuid=16412933260482844; Hm_lpvt_c36ebf0e0753eda09586ef4fb80ea125=1641452276; _csrf_token=yjotG58Nek7Ih5gkrso6PMO9; s_v_web_id=verify_ky2dh7ze_iXW9sb9A_RiT7_4wo5_AWYl_uxk0Yoj6LdU7; tt_scid=VyRa3vLXxeo0ZxqPuMVMHEWbzj-rkd0AI.GMcKWUti4QoWCHoOON0KCqMKkSs32Fe947',
    'Cookie': 'BIDUPSID=48160DE573EFA9D22F8CF1EE8FCF0FC9; BAIDUID=723424862C2B4FB61A564A8BD5F2E6F5:FG=1; PSTM=1635820768; __yjs_duid=1_573eeffb2f81b6ca3c64474b3e79aa5b1635820814331; BDORZ=FFFB88E999055A3F8A630C64834BD6D0; H_PS_PSSID=35294_34442_35105_31254_35239_35048_35097_34584_34504_35347_35245_35137_35329_35319_26350_35145_35301; delPer=0; PSINO=6; BDRCVFR[Hp1ap0hMjsC]=mk3SLVN4HKm; BA_HECTOR=85800k80812k0l0hst1gqdi2b0r; BCLID=10419072851419617101; BDSFRCVID=L_IOJexroG0zBOoH6uQ3UZE2NopWxY5TDYrELPfiaimDVu-VJeC6EG0Pts1-dEu-EHtdogKKKgOTHIDF_2uxOjjg8UtVJeC6EG0Ptf8g0M5; H_BDCLCKID_SF=tR30WJbHMTrDHJTg5DTjhPrMXbnAWMT-MTryKKJL0q7oOKQG0jblWxQXXxnjLbvkJGnRh4oNBUJtjJjYhfO45DuZyxomtfQxtNRJQKDE5p5hKq5S5-OobUPUyUJ9LUvA3gcdot5yBbc8eIna5hjkbfJBQttjQn3hfIkj2CKLK-oj-D8we5DK3e; RT="z=1&dm=baidu.com&si=frr70oibv6&ss=kwmtjsrg&sl=a&tt=7f2&bcn=https%3A%2F%2Ffclog.baidu.com%2Flog%2Fweirwood%3Ftype%3Dperf"; Hm_lvt_d101ea4d2a5c67dab98251f0b5de24dc=1638320206; Hm_lpvt_d101ea4d2a5c67dab98251f0b5de24dc=1638320558; BDUSS=hXfml5QUUtLWdyUUprem55bX5rcFpJQzctRWZYV1BJT1ltNX55RUo2MlhWYzVoSVFBQUFBJCQAAAAAAAAAAAEAAABc7HISMjk2ODE1MTgwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJfIpmGXyKZhc1; CHKFORREG=1a6a4f90ba99e5681ac5548fa8f7b7dd; bdindexid=s491pgiidofolv8dur65g64c33; ab_sr=1.0.1_Yzg4YWI4NzdlNmFjMDE0Mjk0OGFmMGI1YzRjMDIzY2MyOGNlMWViM2FiNDRhZWQxYjdiNWU0YmVlNDAxNTJkZDE1NDRlZGMyOGM2MDE2YzZiOGI0MWUwZjM3ZDA5NmQ1ODRmMDA2ZmE4N2NmZWQ1OTc1YmY3N2FhMWI3NDg0OWEyMDVlNTczYTM0M2MxMjc1MGFmZDlkZWNjZDhiMWU3Mg==; __yjs_st=2_ZTEyNjI1NGY0OTkzZjdhZjM0OGVmY2I5ZTUyMjhlYTRmMWNjN2IzZDdjMjY3ZDc3YzUwYzE3OWYwOGQ4ZTJlNmNiNGI0NWY0ZWFkMTU4MmQ5NTVlYTkzNTc5YWQ1Nzg0NDZjYWY2Mzc2ZjNmOGI0YzNjN2MwZjEyOTFlYWEyMDJlZmE2NWVjOGZiNGM0ZDNkNTYyN2E4OTg4NDY3MWEyODk1MDFjOWQyNjBjNDBkNWJhOTI0MDI3Mzk0NWE0ZTM2YWY5MjEwNTVkMTcwYmY5ZmJmZWZkOWQ4ZDM2YzAwZmNkN2EyNmJjNmMzZGNlM2Q1Y2ZjZjQyNTFlODU3OGU3MmUzYmU5MzQxMjAwOTdmNTY0OTc5ZDBiNWNjZjhmYzAwXzdfODcwNzQ1MDQ=',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
}

from sqlalchemy import create_engine

# engine = create_engine("mysql+pymysql://fecy:Jili@123@172.23.135.5:3306/scrapy")
# engine = create_engine("mysql+pymysql://hsw:65297122he@127.0.0.1:3306/scrapy?charset=utf8")
engine2 = create_engine("mysql+pymysql://cl007:ChengLei-0711@127.0.0.1:3306/scrapy?charset=utf8")
engine = create_engine("mysql+pymysql://cl007:ChengLei-0711@127.0.0.1:3306/test?charset=utf8")
base_url = 'http://index.baidu.com/'
CODE2PROVINCE = {
    901: "山东", 902: "贵州", 903: "江西", 904: "重庆", 905: "内蒙古",
    906: "湖北", 907: "辽宁", 908: "湖南", 909: "福建", 910: "上海",
    911: "北京", 912: "广西", 913: "广东", 914: "四川", 915: "云南",
    916: "江苏", 917: "浙江", 918: "青海", 919: "宁夏", 920: "河北",
    921: "黑龙江", 922: "吉林", 923: "天津", 924: "陕西", 925: "甘肃",
    926: "新疆", 927: "河南", 928: "安徽", 929: "山西", 930: "海南",
    931: "台湾", 932: "西藏", 933: "香港", 934: "澳门", 0: "全国"
}
CODE2AREA = {
    901: "华东", 902: "西南", 903: "华中", 904: "西南", 905: "华北",
    906: "华中", 907: "东北", 908: "华中", 909: "华东", 910: "华东",
    911: "华北", 912: "华南", 913: "华南", 914: "西南", 915: "西南",
    916: "华东", 917: "华东", 918: "西北", 919: "西北", 920: "华北",
    921: "东北", 922: "东北", 923: "华北", 924: "西北", 925: "西北",
    926: "西北", 927: "华中", 928: "华东", 929: "华北", 930: "华南",
    931: "华南", 932: "西南", 933: "华南", 934: "华南",
}
CODE2CITY = {
    1: "济南", 2: "贵阳", 3: "黔南", 4: "六盘水", 5: "南昌",
    6: "九江", 7: "鹰潭", 8: "抚州", 9: "上饶", 10: "赣州",
    11: "重庆", 13: "包头", 14: "鄂尔多斯", 15: "巴彦淖尔", 16: "乌海",
    17: "阿拉善盟", 19: "锡林郭勒盟", 20: "呼和浩特", 21: "赤峰", 22: "通辽",
    25: "呼伦贝尔", 28: "武汉", 29: "大连", 30: "黄石", 31: "荆州",
    32: "襄阳", 33: "黄冈", 34: "荆门", 35: "宜昌", 36: "十堰",
    37: "随州", 38: "恩施", 39: "鄂州", 40: "咸宁", 41: "孝感",
    42: "仙桃", 43: "长沙", 44: "岳阳", 45: "衡阳", 46: "株洲",
    47: "湘潭", 48: "益阳", 49: "郴州", 50: "福州", 51: "莆田",
    52: "三明", 53: "龙岩", 54: "厦门", 55: "泉州", 56: "漳州",
    57: "上海", 59: "遵义", 61: "黔东南", 65: "湘西", 66: "娄底",
    67: "怀化", 68: "常德", 73: "天门", 74: "潜江", 76: "滨州",
    77: "青岛", 78: "烟台", 79: "临沂", 80: "潍坊", 81: "淄博",
    82: "东营", 83: "聊城", 84: "菏泽", 85: "枣庄", 86: "德州",
    87: "宁德", 88: "威海", 89: "柳州", 90: "南宁", 91: "桂林",
    92: "贺州", 93: "贵港", 94: "深圳", 95: "广州", 96: "宜宾",
    97: "成都", 98: "绵阳", 99: "广元", 100: "遂宁", 101: "巴中",
    102: "内江", 103: "泸州", 104: "南充", 106: "德阳", 107: "乐山",
    108: "广安", 109: "资阳", 111: "自贡", 112: "攀枝花", 113: "达州",
    114: "雅安", 115: "吉安", 117: "昆明", 118: "玉林", 119: "河池",
    123: "玉溪", 124: "楚雄", 125: "南京", 126: "苏州", 127: "无锡",
    128: "北海", 129: "钦州", 130: "防城港", 131: "百色", 132: "梧州",
    133: "东莞", 134: "丽水", 135: "金华", 136: "萍乡", 137: "景德镇",
    138: "杭州", 139: "西宁", 140: "银川", 141: "石家庄", 143: "衡水",
    144: "张家口", 145: "承德", 146: "秦皇岛", 147: "廊坊", 148: "沧州",
    149: "温州", 150: "沈阳", 151: "盘锦", 152: "哈尔滨", 153: "大庆",
    154: "长春", 155: "四平", 156: "连云港", 157: "淮安", 158: "扬州",
    159: "泰州", 160: "盐城", 161: "徐州", 162: "常州", 163: "南通",
    164: "天津", 165: "西安", 166: "兰州", 168: "郑州", 169: "镇江",
    172: "宿迁", 173: "铜陵", 174: "黄山", 175: "池州", 176: "宣城",
    177: "巢湖", 178: "淮南", 179: "宿州", 181: "六安", 182: "滁州",
    183: "淮北", 184: "阜阳", 185: "马鞍山", 186: "安庆", 187: "蚌埠",
    188: "芜湖", 189: "合肥", 191: "辽源", 194: "松原", 195: "云浮",
    196: "佛山", 197: "湛江", 198: "江门", 199: "惠州", 200: "珠海",
    201: "韶关", 202: "阳江", 203: "茂名", 204: "潮州", 205: "揭阳",
    207: "中山", 208: "清远", 209: "肇庆", 210: "河源", 211: "梅州",
    212: "汕头", 213: "汕尾", 215: "鞍山", 216: "朝阳", 217: "锦州",
    218: "铁岭", 219: "丹东", 220: "本溪", 221: "营口", 222: "抚顺",
    223: "阜新", 224: "辽阳", 225: "葫芦岛", 226: "张家界", 227: "大同",
    228: "长治", 229: "忻州", 230: "晋中", 231: "太原", 232: "临汾",
    233: "运城", 234: "晋城", 235: "朔州", 236: "阳泉", 237: "吕梁",
    239: "海口", 241: "万宁", 242: "琼海", 243: "三亚", 244: "儋州",
    246: "新余", 253: "南平", 256: "宜春", 259: "保定", 261: "唐山",
    262: "南阳", 263: "新乡", 264: "开封", 265: "焦作", 266: "平顶山",
    268: "许昌", 269: "永州", 270: "吉林", 271: "铜川", 272: "安康",
    273: "宝鸡", 274: "商洛", 275: "渭南", 276: "汉中", 277: "咸阳",
    278: "榆林", 280: "石河子", 281: "庆阳", 282: "定西", 283: "武威",
    284: "酒泉", 285: "张掖", 286: "嘉峪关", 287: "台州", 288: "衢州",
    289: "宁波", 291: "眉山", 292: "邯郸", 293: "邢台", 295: "伊春",
    297: "大兴安岭", 300: "黑河", 301: "鹤岗", 302: "七台河", 303: "绍兴",
    304: "嘉兴", 305: "湖州", 306: "舟山", 307: "平凉", 308: "天水",
    309: "白银", 310: "吐鲁番", 311: "昌吉", 312: "哈密", 315: "阿克苏",
    317: "克拉玛依", 318: "博尔塔拉", 319: "齐齐哈尔", 320: "佳木斯", 322: "牡丹江",
    323: "鸡西", 324: "绥化", 331: "乌兰察布", 333: "兴安盟", 334: "大理",
    335: "昭通", 337: "红河", 339: "曲靖", 342: "丽江", 343: "金昌",
    344: "陇南", 346: "临夏", 350: "临沧", 352: "济宁", 353: "泰安",
    356: "莱芜", 359: "双鸭山", 366: "日照", 370: "安阳", 371: "驻马店",
    373: "信阳", 374: "鹤壁", 375: "周口", 376: "商丘", 378: "洛阳",
    379: "漯河", 380: "濮阳", 381: "三门峡", 383: "阿勒泰", 384: "喀什",
    386: "和田", 391: "亳州", 395: "吴忠", 396: "固原", 401: "延安",
    405: "邵阳", 407: "通化", 408: "白山", 410: "白城", 417: "甘孜",
    422: "铜仁", 424: "安顺", 426: "毕节", 437: "文山", 438: "保山",
    456: "东方", 457: "阿坝", 466: "拉萨", 467: "乌鲁木齐", 472: "石嘴山",
    479: "凉山", 480: "中卫", 499: "巴音郭楞", 506: "来宾", 514: "北京",
    516: "日喀则", 520: "伊犁", 525: "延边", 563: "塔城", 582: "五指山",
    588: "黔西南", 608: "海西", 652: "海东", 653: "克孜勒苏柯尔克孜", 654: "天门仙桃",
    655: "那曲", 656: "林芝", 657: "None", 658: "防城", 659: "玉树",
    660: "伊犁哈萨克", 661: "五家渠", 662: "思茅", 663: "香港", 664: "澳门",
    665: "崇左", 666: "普洱", 667: "济源", 668: "西双版纳", 669: "德宏",
    670: "文昌", 671: "怒江", 672: "迪庆", 673: "甘南", 674: "陵水黎族自治县",
    675: "澄迈县", 676: "海南", 677: "山南", 678: "昌都", 679: "乐东黎族自治县",
    680: "临高县", 681: "定安县", 682: "海北", 683: "昌江黎族自治县", 684: "屯昌县",
    685: "黄南", 686: "保亭黎族苗族自治县", 687: "神农架", 688: "果洛", 689: "白沙黎族自治县",
    690: "琼中黎族苗族自治县", 691: "阿里", 692: "阿拉尔", 693: "图木舒克",
}

r_codes = {province: str(code) for code, province in CODE2PROVINCE.items()}
codes = {str(code): province for code, province in CODE2PROVINCE.items()}


class BaiduindexCore():
    def decrypt(self, t, e):
        n, i, a, result = list(t), list(e), {}, []
        ln = int(len(n) / 2)
        start, end = n[ln:], n[:ln]
        a = dict(zip(end, start))
        return ''.join([a[j] for j in e])

    def get_ptbk(self, uniqid):
        url = base_url + 'Interface/ptbk?uniqid=%s' % uniqid
        res = requests.get(url, headers=headers)
        if res.status_code == 200:
            ptbk = res.json()['data']
            return ptbk
        else:
            print('uniqid获取失败~状态码为：%s' % res.status_code)
            return None

    def parse_date(self, start, end):
        START = datetime.datetime.strptime(start, '%Y-%m-%d')
        END = datetime.datetime.strptime(end, '%Y-%m-%d')
        BEGIN = datetime.datetime.strptime('2011-01-01', '%Y-%m-%d')  # 百度指数能查到的最早日期
        LAST = datetime.datetime.today() - datetime.timedelta(days=1)  # 百度指数能查到的最晚日期
        if START < BEGIN: START = BEGIN
        if END > LAST: END = LAST
        delta_days = (END - START).days
        if delta_days < 0:
            print('开始日期不能晚于结束日期！')
            return None
        START_STR = START.strftime('%Y-%m-%d')
        BATCH = 360  # 每次提取BATCH天数据，若查询的时间范围超过一年，则百度指数返回以周为周期的数据
        retVal = []
        curDate = END
        curPreDate = curDate - datetime.timedelta(days=BATCH)
        curPreDate_str = curPreDate.strftime('%Y-%m-%d')
        curDate_str = curDate.strftime('%Y-%m-%d')
        retVal.append([START_STR if curPreDate < START else curPreDate_str, curDate_str])
        curDate = curPreDate - datetime.timedelta(days=1)
        while curDate > START:
            curPreDate = curDate - datetime.timedelta(days=BATCH)
            curPreDate_str = curPreDate.strftime('%Y-%m-%d')
            curDate_str = curDate.strftime('%Y-%m-%d')
            retVal.append([START_STR if curPreDate < START else curPreDate_str, curDate_str])
            curDate = curPreDate - datetime.timedelta(days=1)
        retVal = retVal[::-1]  # 逆序转换
        return retVal

    def Baidu_index(self, keyword, start, end, area=0):
        area = str(area)
        if not area.isdigit():
            if area not in r_codes.keys():
                print('区域编码文件中无%s的编码~' % area)
                return None
            else:
                code = r_codes[area]
        else:
            if area not in codes.keys():
                print('区域编码文件中无编码:%s' % area)
                return
            else:
                code = area
        dates = self.parse_date(start, end)
        if not dates: return None
        retVal = {'period': [dates[0][0], dates[-1][1]], 'all': [], 'pc': [], 'mobile': []}
        for date in dates:
            print('正在查询%s%s至%s的数据' % (codes[code], date[0], date[1]))
            keywordd = quote(keyword)
            url = base_url + 'api/SearchApi/index?area={0}&word=[[%7B%22name%22:%22{1}%22,%22wordType%22:1%7D]]&startDate={2}&endDate={3}'.format(
                code, keywordd, date[0], date[1])
            print('url:', url)
            res = requests.get(url, headers=headers)
            print("res:", res.text)
            if not dates: return None
            if res.status_code == 200:
                data = res.json()['data']
                print("data", data)
                all_data = data['userIndexes'][0]['all']['data']
                pc_data = data['userIndexes'][0]['pc']['data']
                mobile_data = data['userIndexes'][0]['wise']['data']
                uniqid = data['uniqid']
                ptbk = self.get_ptbk(uniqid)
                retVal['all'] += [int(x) if x else 0 for x in self.decrypt(ptbk, all_data).split(',')]
                retVal['pc'] += [int(x) if x else 0 for x in self.decrypt(ptbk, pc_data).split(',')]
                retVal['mobile'] += [int(x) if x else 0 for x in self.decrypt(ptbk, mobile_data).split(',')]

            else:
                print('数据获取失败~状态码为：%s' % res.status_code)
                return None
            time.sleep(3.5)
        return retVal

    def Baidu_FeedSearch(self, keyword, start, end, area=0):
        area = str(area)
        if not area.isdigit():
            if area not in r_codes.keys():
                print('区域编码文件中无%s的编码~' % area)
                return None
            else:
                code = r_codes[area]
        else:
            if area not in codes.keys():
                print('区域编码文件中无编码:%s' % area)
                return
            else:
                code = area
        dates = self.parse_date(start, end)
        if not dates: return None
        retVal = {'period': [dates[0][0], dates[-1][1]], 'all': []}
        for date in dates:
            print('正在查询%s%s至%s的数据' % (codes[code], date[0], date[1]))
            url = base_url + 'api/FeedSearchApi/getFeedIndex?area={0}&word=[[%7B%22name%22:%22{1}%22,%22wordType%22:1%7D]]&startDate={2}&endDate={3}'.format(
                code, keyword, date[0], date[1])
            print('url:', url)
            res = requests.get(url, headers=headers)
            print("res:", res.text)
            if res.status_code == 200:
                data = res.json()['data']
                data1 = data['index'][0]['data']
                uniqid = data['uniqid']
                ptbk = self.get_ptbk(uniqid)
                data3 = self.decrypt(ptbk, data1).split(',')
                lenths = len(pd.date_range(date[0], date[1], freq='D'))
                if len(data3) == lenths:
                    retVal['all'] += [int(x) if x else 0 for x in data3]
                else:
                    retVal['all'] += [0 for x in range(lenths)]
            else:
                print('数据获取失败~状态码为：%s' % res.status_code)
                return None
            time.sleep(3.5)
        return retVal

    def Baidu_news(self, keyword, area=0):
        area = str(area)
        if not area.isdigit():
            if area not in r_codes.keys():
                print('区域编码文件中无%s的编码~' % area)
                return None
            else:
                code = r_codes[area]
        else:
            if area not in codes.keys():
                print('区域编码文件中无编码:%s' % area)
                return
            else:
                code = area
        url = base_url + 'api/FeedSearchApi/getFeedIndex?area=%s&word=[["name":"%s","wordType":1]]' % (
            code, keyword)
        res = requests.get(url, headers=headers)
        retVal = []
        if res.status_code == 200:
            data = res.json()['data']
            data1 = data['index'][0]['data']
            startDate = data['index'][0]['startDate']
            endDate = data['index'][0]['endDate']
            uniqid = data['uniqid']
            ptbk = self.get_ptbk(uniqid)
            retVal += [int(x) if x else 0 for x in self.decrypt(ptbk, data1).split(',')]
        else:
            print('数据获取失败~状态码为：%s' % res.status_code)
            return None
        time.sleep(3.5)
        return retVal, startDate, endDate

    def get_origin(self, key, start, end, area=0, type='day'):
        url = 'https://index.baidu.com/api/SearchApi/region?region=0&word={0}&startDate={1}&endDate={2}&days='.format(
            key, start, end)
        print(url)
        res = requests.get(url, headers=headers)
        print(res.text)
        if res.status_code == 200:
            data = res.json()['data']
            print(data)
            prov = data['region'][0]['prov']
            city = data['region'][0]["city"]
        provs = []
        citys = []
        for key, values in prov.items():
            areaname = CODE2AREA[int(key)]
            provname = CODE2PROVINCE[int(key)]
            provs.append(
                {'searchname': name, 'areaname': areaname, 'provname': provname, 'provid': key, 'keyvalue': values,
                 'startdate': start, 'enddate': end, 'scrapydate': datetime.datetime.now()})
        provs = pd.DataFrame(provs)

        for key, values in city.items():
            cityname = CODE2CITY[int(key)]
            citys.append(
                {'searchname': name, 'cityname': cityname, 'cityid': key, 'cityvalue': values, 'startdate': start,
                 'enddate': end, 'scrapydate': datetime.datetime.now()})
        citys = pd.DataFrame(citys)
        return provs, citys

    def get_SocialApi(self, key):
        url = 'https://index.baidu.com/api/SocialApi/baseAttributes?wordlist={0}'.format(key)
        res = requests.get(url, headers=headers)
        if res.status_code == 200:
            data = res.json()['data']
            startDate = data['startDate']
            endDate = data['endDate']
            datas = pd.DataFrame()
            for dataz in data['result']:
                word = dataz['word']
                gender = pd.DataFrame(dataz['gender'])
                gender['type'] = 'gender'
                age = pd.DataFrame(dataz['age'])
                age['type'] = 'age'
                datat = pd.concat([gender, age], axis=0)
                datat['word'] = word
                datat['startDate'] = startDate
                datat['endDate'] = endDate
                datat['searchwords'] = key
                datas = pd.concat([datat, datas], axis=0)
            return datas

    def get_interest(self, key, typeid=''):
        url = 'https://index.baidu.com/api/SocialApi/interest?wordlist={0}&typeid={1}'.format(key, typeid)
        res = requests.get(url, headers=headers)
        if res.status_code == 200:
            data = res.json()['data']
            startDate = data['startDate']
            endDate = data['endDate']
            main_inserest = pd.DataFrame()
            for dataz in data['result']:
                word = dataz['word']
                interest = pd.DataFrame(dataz['interest'])
                interest['word'] = word
                interest['startDate'] = startDate
                interest['endDate'] = endDate
                interest['searchwords'] = key
                main_inserest = pd.concat([interest, main_inserest], axis=0)

        typeids = set(main_inserest['typeId'])
        inserts_des = pd.DataFrame()
        for typeid in tqdm(typeids):
            url = 'https://index.baidu.com/api/SocialApi/interest?wordlist={0}&typeid={1}'.format(key, typeid)
            res = requests.get(url, headers=headers)
            if res.status_code == 200:
                data = res.json()['data']
                startDate = data['startDate']
                endDate = data['endDate']
                for dataz in data['result']:
                    word = dataz['word']
                    interest = pd.DataFrame(dataz['interest'])
                    interest['word'] = word
                    interest['startDate'] = startDate
                    interest['endDate'] = endDate
                    interest['searchwords'] = key
                    interest['mian_typeid'] = typeid
                    inserts_des = pd.concat([inserts_des, interest], axis=0)
        return main_inserest, inserts_des


if __name__ == '__main__':
    START = '2021-01-01'
    END = '2021-12-19'
    baiduidnex = BaiduindexCore()

    # names = ['AI','特斯拉']   需要爬取的数据
    # vehNames= pd.read_sql('SELECT vehName FROM scrapy.vehnames', engine)['vehName']
    vehNames = pd.read_sql('SELECT vehName FROM scrapy.vehnames', engine2)['vehName']
    vehNames2 = pd.read_sql('SELECT vehname FROM scrapy.vehnames_xiaoxiong_csv', engine2)['vehname']
    brandNamaes = pd.read_sql('SELECT brandName  from  scrapy.brandNames', engine2)['brandName']
    names = vehNames2
    print(names)

    datalist = pd.date_range(START, END, freq='D')

    for name in names:
        print(name)
        # 爬取 百度 搜索指数
        now_time = datetime.datetime.now()
        try:
            data1 = baiduidnex.Baidu_index(name, START, END, '0')
            datax = pd.DataFrame(
                {'date': datalist, 'search_name': name, 'allx': data1['all'], 'pc': data1['pc'],
                 'mobile': data1['mobile'],
                 'scrapy_date': now_time})
            datax.to_sql('baidu_search_index', engine, if_exists='append', index=False)
            print("baidu_search_index success", name)
        except:
            print("baidu_search_index failed",name)
            time.sleep(3)
            pass
        # 爬取 百度 咨询指数
        try:
            retVal = baiduidnex.Baidu_FeedSearch(name, START, END, '0')
            datay = pd.DataFrame(
                {'date': datalist, 'search_name': name, 'allx': retVal['all'], 'scrapy_date': now_time})
            #print('datay:',datay)
            datay.to_sql('baidu_search_feed', engine, if_exists='append', index=False)
            print("baidu_search_feed success", name)
        except:
            time.sleep(3)
            print("baidu_search_feed failed",name)
            pass
        # 爬取百度指数中的区域数据
        try:
            prov, city = baiduidnex.get_origin(name, START, END)
            prov.to_sql('baidu_prov', engine, if_exists='append', index=False)
            city.to_sql('baidu_city', engine, if_exists='append', index=False)
        except:
            time.sleep(1)
            pass
        try:
            # 爬取吉利的用户属性特征
            datas = baiduidnex.get_SocialApi(name)
            datas.to_sql('baidu_Social', engine, if_exists='append', index=False)
            # 爬取吉利兴趣数据
            inserts, inserts_des = baiduidnex.get_interest(name)
            inserts.to_sql('baidu_inserts', engine, if_exists='append', index=False)
            inserts_des.to_sql('baidu_inserts_des', engine, if_exists='append', index=False)
        except:
            time.sleep(1)
            pass

