import requests
from threading import Thread
import re
import pymysql
from lxml import html
headers1 = {
        "Referer": "https://lol.qq.com/",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.41"
}
Cookie = '''DOTA2=4c62dde600f99084c49c599fd9ef9fa1; __mtxsr=csr:(direct)|cdt:(direct)|advt:(none)|camp:(none); isLogin=0; __mtxcar=www.baidu.com:/link; puclic_hg_flag2=true; isCover=7ec7577d2603bc50ea688e8dd68937be; DOTA2=20ba7a7722df0937e34e344feb48179b; __mtxud=b5f3e178b34c1cd4.1638586396079.1638597166796.1638600171414.3; Hm_lvt_fa320491e2708f227dc55873588057c5=1638586396,1638597169,1638600603; __mtxsd=52d24ed0.1638600715784.35188.11; Hm_lpvt_fa320491e2708f227dc55873588057c5=1638600716'''
host ='''www.dota2.com.cn'''
proxies = {
    'http': None,
    'https': None
}
headers2 = {
        "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
        'Cookie': Cookie,
        'Host': host
}


# 第一页攻略
# https://apps.game.qq.com/cmc/zmMcnTargetContentList?r0=jsonp&page=1&num=16&target=27&source=web_pc&r1=jQuery191047697746305480315_1638540905194&_=1638540905201
# 第二页攻略
# https://apps.game.qq.com/cmc/zmMcnTargetContentList?r0=jsonp&page=1&num=16&target=25&source=web_pc&r1=jQuery191047697746305480315_1638540905199&_=1638540210238
# 规律：每一页改变的page属性，每个栏目切换改变的target属性。分析完毕！
# 赛事
# LOL模块爬取
# 爬取前五页的[综合(23)，公告(24),赛事(25)，攻略(27),社区(28)]
def LOL():
    # 数据库信息
    db = pymysql.Connect(host="localhost", port=3306, user="root", password="123456", db="gamdata", charset="utf8")
    #
    # # 添加游标对象
    cursor = db.cursor()
    list1 = [23, 24, 25, 27, 28]
    for i in range(1, 7):
        for b in range(len(list1)):
            url = 'http://apps.game.qq.com/cmc/zmMcnTargetContentList?r0=jsonp&page='+str(i)+'&num=16&target='+str(list1[b])+'&source=web_pc&r1=jQuery191047697746305480315_1638540905199&_=1638540210238'
            data = requests.get(url, headers=headers1).text
            # print(data)
            # 测试成功，LOL需要的网页已经全部获取到，下面进行数据清洗---
#             以上部分为数据的爬取
#             需要清洗的部分: iDocID(用于获取超链接地址),sAuther(作者),sIdxTime(日期),sTitle(标题)
            iDocID_ = r',"iDocID":"(.*?)",'
            iDocID__ = re.compile(iDocID_)
            iDocID = iDocID__.findall(data)
            # print(iDocID)
# iDocID获取成功！
            sAuther_ = r'"sAuthor":"(.*?)"'
            sAuther__ = re.compile(sAuther_)
            sAuther = sAuther__.findall(data)
            # print(sAuther)
            # print(len(sAuther))
            sIdxTime_ = r'"sIdxTime":"(.*?)"'
            sIdxTime__ = re.compile(sIdxTime_)
            sIdxTime = sIdxTime__.findall(data)

            sTitle_ = r'"sTitle":"(.*?)",'
            sTitle__ = re.compile(sTitle_)
            sTitle = sTitle__.findall(data)
            # print(sTitle)
            print(iDocID[b] + sAuther[b] + sIdxTime[b] + sTitle[b])
#             将数据存储到数据库中
            for c in range(len(sTitle)):
                # sql1 = ''' insert ignore into LOL values("https://lol.qq.com/news/detail.shtml?docid={}", "{}", "{}", "{}")'''.format(iDocID[c], sAuther[c], sIdxTime[c], sTitle[c])
                # print(sql)
                # 保存到数据库中，需要的小伙伴需要创建表
                sql2 = '''replace into lol values("{}", "https://lol.qq.com/news/detail.shtml?docid={}", "{}")'''.format(sTitle[c], iDocID[c], sIdxTime[c])
                # cursor.execute(sql1)
                cursor.execute(sql2)
                db.commit()
                print('存取成功' + sTitle[c])
# 第一页公告网址：https://www.dota2.com.cn/news/gamenews/index1.htm
# 第二页公告网址：https://www.dota2.com.cn/news/gamenews/index2.htm
def Dota2():
    # 设定爬取五页
    db = pymysql.Connect(host="localhost", port=3306, user="root", password="123456", db="gamdata", charset="utf8")
    #
    # # 添加游标对象
    cursor = db.cursor()
    for i in range(1, 6):
        url = 'http://www.dota2.com.cn/news/gamenews/index'+str(i)+'.html'
        requests.packages.urllib3.disable_warnings()
        requests.packages
        data = requests.get(url, headers=headers2, verify=False, proxies=proxies).text
        # requests.get(url, headers=headers2)
        # print(data.text)
#         html界面开始用xpath数据清洗
        html1 = html.etree.HTML(data)
        link = html1.xpath('//li[@class="pane active"]//a/@href')   #资讯的超链接地址
        title = html1.xpath('//li[@class="pane active"]//a/div[@class="news_msg"]/h2[@class="title"]/text()')   #资讯的文章标题
        date = html1.xpath('//li[@class="pane active"]//a/div[@class="news_msg"]/p[@class="date"]/text()')      #资讯的发布时间
        # print(date)
        # print(link)
        # print(title)
#         数据清洗成功！
#         开始存入Dota2数据库中
        for b in range(len(link)):
            sql = '''replace into Dota2 values("{}", "{}", "{}")'''.format(title[b], link[b], date[b])
            cursor.execute(sql)
            db.commit()
            print('存储成功第' + str(b) +"条")


# 王者荣耀url分析:1760 -- 1764 ，1769
# 每一页start改变 0 12 24 36 48 每次增加12  ---12的倍数
#     https://apps.game.qq.com/cmc/cross?serviceId=18&filter=channel&sortby=sIdxTime&source=web_pc&limit=12&logic=or&typeids=1&chanid=1760&start=24&withtop=yes&exclusiveChannel=4&exclusiveChannelSign=01f98ef64cdad4b927815cc60ed21884&time=1638606474
headers3 = {
        "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
        # "referer": "https://pvp.qq.com/"
}
def King():
        #暂定爬取5页
        db = pymysql.Connect(host="localhost", port=3306, user="root", password="123456", db="gamdata", charset="utf8")
        #
        # # 添加游标对象
        cursor = db.cursor()
        list1 = [0, 12, 24, 36, 48]
        list2 = [1760, 1761, 1762, 1763, 1764, 1769]
        for i in list1:
            for b in list2:
                url = 'http://apps.game.qq.com/cmc/cross?serviceId=18&filter=channel&sortby=sIdxTime&source=web_pc&limit=12&logic=or&typeids=1&chanid='+str(b)+'&start='+str(i)+'&withtop=yes&exclusiveChannel=4&exclusiveChannelSign=01f98ef64cdad4b927815cc60ed21884&time=1638606474'
                # 502错误细节处理 进行2次请求！
                requests.get(url, headers=headers3)
                response2 = requests.get(url, headers=headers3).text
                # response3 = requests.get(url, headers=headers3).text
                # print(response)
                # print(response2)
                # print(response3)
                # print(response)
                # print(response)
                demo1 = r'"iId":(.*?),'
                tID1 = re.compile(demo1)    #资讯的链接
                demo2 = r'"sAuthor":"(.*?)",'
                sAuther1 = re.compile(demo2)
                demo3 = r'"sTitle":"(.*?)"'
                sTitle1 = re.compile(demo3)
                demo4 = r'"sTargetIdxTime":"(.*?)"'
                date1 = re.compile(demo4)
                tID = tID1.findall(response2)
                sAuther = sAuther1.findall(response2)
                sTitle = sTitle1.findall(response2)
                date = date1.findall(response2)
                print(tID+sAuther+sTitle+date)
#                 开始数据库存储
                for c in range(len(tID)):
                    sql = '''replace into lol values("{}","https://pvp.qq.com/web201706/newsdetail.shtml?tid={}","{}")'''.format(sTitle[c], tID[c], date[c])
                    cursor.execute(sql)
                    db.commit()
                    print('yes')



# https://ys.mihoyo.com/content/ysCn/getContentList?pageSize=5&pageNum=1&channelId=10
# https://ys.mihoyo.com/content/ysCn/getContentList?pageSize=5&pageNum=2&channelId=10
# https://ys.mihoyo.com/content/ysCn/getContentList?pageSize=5&pageNum=3&channelId=258
# 原神规律    pagenum 为几页 pagenum丛1开始   channelid为不同板块 channelid丛10开始 第四大模块为258

headersOG = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Safari/537.36 Edg/96.0.1054.41",
        "referer": "https://ys.mihoyo.com/main/news/258"

}
def Origod():
    db = pymysql.Connect(host="localhost", port=3306, user="root", password="123456", db="gamdata", charset="utf8")
    #
    # # 添加游标对象
    cursor = db.cursor()
    # 暂定爬取十页
    list1 = [10, 11, 12, 13, 258]
    for i in range(1, 11):
        for b in list1:
            url = 'http://ys.mihoyo.com/content/ysCn/getContentList?pageSize=5&pageNum='+str(i)+'&channelId='+str(b)
            response = requests.get(url, headers=headersOG).text
            # print(response)
            title1 = r'"title":"(.*?)",'
            date1 = r'"start_time":"(.*?)",'
            id1 = r'"id":"(.*?)"'

            title2 = re.compile(title1)
            title = title2.findall(response)
            date2 = re.compile(date1)

            date = date2.findall(response)
            id2 = re.compile(id1)
            ids = id2.findall(response)
            # print(ids)
            # print(title+date+ids)
            # 清除掉所以空列表，开始数据库存储
            if title == []:
                # print('yes')
                continue
            else:
                # print(title+date+ids)
                for a in range(len(title)):
                    sql = '''replace into lol values("{}","https://ys.mihoyo.com/main/news/detail/{}","{}")'''.format( title[a], ids[a], date[a])
                    cursor.execute(sql)
                    db.commit()
                    print("正在保存第"+str(i)+"页,第"+str(b)+"板块,第"+str(a)+"条")




if __name__ == '__main__':

        # Origod()
        # King()
        LOL()
        # Dota2()

