import time

from requests.packages import urllib3

from model import Mysqldb
from model import briefingsOfGame
import requests
import urllib
from selenium import webdriver
from bs4 import BeautifulSoup
from lxml import etree
import re
import random
import pandas as pd
from tqdm import tqdm

headers = [
    {'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)'},
    {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'},
    # {'User-Agent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'},
    {'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0'},
    {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'},

    #    'User-Agent': 'Mozilla/5.0 (Linux; U; Android 6.0.1; zh-cn; MI NOTE LTE Build/MMB29M) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.146 Mobile Safari/537.36 XiaoMi/MiuiBrowser/8.8.7'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Mobile Safari/537.36'},
    # {
    #     'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_4_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.0 Mobile/15E148 Safari/604.1'},
    {
        'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 5.1.1; Nexus 5 Build/LMY48B; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/43.0.2357.65 Mobile Safari/537.36'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Galaxy Nexus Build/IML74K) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Mobile Safari/535.7'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; U; Android-4.0.3; en-us; Xoom Build/IML77) AppleWebKit/535.7 (KHTML, like Gecko) CrMo/16.0.912.75 Safari/535.7'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux;u;Android 4.2.2;zh-cn;) AppleWebKit/534.46 (KHTML,like Gecko) Version/5.1 Mobile Safari/10600.6.3'},
    {
        'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 10_3 like Mac OS X) AppleWebKit/602.1.50 (KHTML, like Gecko) CriOS/56.0.2924.75 Mobile/14E5239e YisouSpider/5.0 Safari/602.1'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 4.0; GT-I9300 Build/IMM76D) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/59.0.3071.92'},
    # {
    #     'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0.1; SOV33 Build/35.0.D.0.326) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.91 Mobile Safari/537.36'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; HUAWEI MLA-AL10 Build/HUAWEIMLA-AL10) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Mobile Safari/537.36'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 7.1.1; vivo X20A Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/62.0.3202.84 Mobile Safari/537.36 VivoBrowser/5.6.1.1'},
    {
        'User-Agent': 'Mozilla/5.0 (Linux; U; Android 6.0.1; zh-CN; SM-J7108 Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/11.9.7.977 Mobile Safari/537.36'},
    {
        'User-Agent': 'Mozilla/6.0 (Linux; Android 8.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.183 Mobile Safari/537.36'}
]


class steam_init(object):
    def __init__(self, name, announce):
        urllib3.disable_warnings()
        self.url = "https://store.steampowered.com/tags/zh-cn/" + name
        self.headers = random.choice(headers)
        self.announce = announce
        # self.driver = webdriver.Chrome(executable_path='C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe')

    def get_page(self):
        s = requests.session()
        s.keep_alive = False
        response = requests.get(self.url, self.headers)
        str = '<span id="' + self.announce + "_total" + '">(.*?)</span>'
        com = re.compile(str)
        result = re.findall(com, response.text)[0]
        result = re.sub(r',', '', result)
        max_page = int(int(result) / 15) + 1
        page_num = input("输入想要爬取到第几页，当前商品列最大的页数为{}:  ".format(max_page))
        return int(page_num)


class steam_spider_request():
    def __init__(self, name, announce, page, startPage):
        self.headers = random.choice(headers)
        self.name = name
        self.announce = announce
        self.page = page
        self.startPage = startPage

    def start_spider(self):
        srclist = []
        IDlist = []
        namelist = []
        for page in tqdm(range(self.startPage, self.page + 1)):
            url = 'https://store.steampowered.com/contenthub/querypaginated/tags/{0}/render/?query=&start={1}&count=15&cc=CN&l=schinese&v=4&tag={2}' \
                .format(self.announce, page * 15, self.name)
            s = requests.session()
            s.keep_alive = False
            html = requests.get(url, self.headers).text
            com = re.compile('https://store.steampowered.com/app/(.*?)/(.*?)/')
            com1 = re.compile('href="(.*?)"')
            result = re.sub(r'\\', '', html)
            result = re.findall(com1, result)
            for dat in result:
                srclist.append(str(dat))
                IDlist.append(re.findall(com, str(dat))[0][0])
                namelist.append(re.findall(com, str(dat))[0][1])
            print('已完成{}页的内容'.format(page))
            self.save(srclist, IDlist, namelist)
            # print('已存储{}页的内容'.format(page))
            srclist.clear()
            IDlist.clear()
            namelist.clear()
            time.sleep(random.random() * 5)

    def save(self, urls, Ids, names):
        mysqldb = Mysqldb('cosplaygame')
        for i in range(len(urls)):
            game = briefingsOfGame(names[i], Ids[i], urls[i], mysqldb.Get_Count() + 1)
            mysqldb.Store_data(game.ConstructDir())


# srclist, IDlist, namelist = self.get_spider()
# df = pd.DataFrame(list(zip(srclist, IDlist, namelist)),
#                   columns=['链接', 'ID', '游戏名'])
# return df


if __name__ == '__main__':
    # rep_thread = ['rep1','rep2','rep3','rep4']  #处理线程
    num = ['NewReleases', 'TopSellers', 'ConcurrentUsers', 'TopRated', 'ComingSoon']
    # game_type = input('输入想要爬取的游戏类型，例如：动作，射击……  ')
    game_type = "角色扮演"
    game_type = urllib.parse.quote(game_type)
    # game_anno = int(input('输入查询的货列编号：新品与热门商品1、热销商品2、热门游戏3、最受好评4、即将发行5  '))
    game_anno = 2
    # startPage = int(input('请输入开始爬的页数'))
    startPage = 389+30+14+80+5+19+5+136+65+103
    1000-81

    # 获取最大页数，返回想要爬取的页数0
    steam = steam_init(game_type, num[game_anno - 1])
    page = steam.get_page()

    # 爬取规定页数的游戏基本信息
    # path1 = 'first.xlsx'

    spider = steam_spider_request(game_type, num[game_anno - 1], page, startPage)
    # spider.get_spider(page)
    spider.start_spider()
    # save = spider.save()
    # save.to_excel(path1)
