import os
import requests
from requests.adapters import HTTPAdapter
import openpyxl
from lxml import etree

# 发送HTTP请求时的HEAD信息，用于伪装为浏览器
rHeads = {
    'Connection': 'Keep-Alive',
    'Accept': 'text/html, application/xhtml+xml, */*',
    'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
    'Accept-Encoding': 'gzip, deflate',
    'User-Agent': 'Mozilla/6.1 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'
}
# 请求超时及重试设定
connectTimeout = 10
readTimeout = 20
picTimeout = 3
rSession = requests.Session()
rSession.mount('http://', HTTPAdapter(max_retries=3))
rSession.mount('https://', HTTPAdapter(max_retries=3))
# 根路径初始化
# http://www.tgbus.com/gba/screen/gba/61.htm
urlRoot = 'http://www.tgbus.com'
pageStart = 1
pageEnd = 88

# # excel文件参数初始化
# xFile = 'gba_tgbus.xlsx'
# # 打开excel文件准备存储
# xlwb = openpyxl.load_workbook(xFile)
# xlst = xlwb['Sheet1']
# # 给excel单元格赋值
# # for i in range(1,20):
# #     sheetPosts.cell(row=i, column=2).value = 5

# post总计数
postNum = 0

# 图片集中存放
picPath = './tgbus/gba'
if not os.path.exists(picPath):
    os.makedirs(picPath)


def page_coding(res_coding):
    # 为防止出错，编码utf-8
    res_coding.encoding = 'utf-8'
    # 将html构建为Xpath模式
    return etree.HTML(res_coding.content)


def x_strip(s):
    word = ''
    for i in s:
        if ord(i) > 31 or ord(i) == 10 or ord(i) == 13:
            word += i
    return word


def download_pic(d_url, d_path):
    try:
        pic_file = requests.get(d_url, headers=rHeads, verify=False, timeout=(picTimeout, readTimeout))
        fp = open(d_path, 'wb')
        fp.write(pic_file.content)
        fp.close()
    except requests.RequestException as d_e:
        print(d_e)


# Xpath语法，获取列表
# pageList = root.xpath("//div[@class='pg']/a/@href")
# lastPage = root.xpath("//a[@class='last']/text()")
# postList = root.xpath("//a[@class='s xst']//text()")


def getTgbus():
    # 遍历列表页
    for pageNum in range(pageStart, pageEnd):
        # 获取指定列表页
        pageUrl = '{}/gba/screen/gba/{}.htm'.format(urlRoot, pageNum)
        print(pageUrl)
        # 页面列表保存在第二列
        # stPages.cell(row=pageNum, column=2).value = pageUrl
        try:
            res = rSession.get(pageUrl, headers=rHeads, verify=False, timeout=(connectTimeout, readTimeout))
            res.raise_for_status()
            postContents = page_coding(res).xpath("//table[@width='760']//tr")
        except requests.RequestException as e:
            print(e)
            continue

        # 内容按行存储，从第3列开始存储
        iCount = 0
        for tr in postContents:
            # 游戏信息
            if iCount % 4 == 1:
                postNum += 1
                title = tr.xpath("./td/font/text()")
                # print(title)
                if title:
                    xlst.cell(row=postNum, column=1).value = title[0].strip()
                    print(title)
            # 游戏信息
            if iCount % 4 == 2:
                text = tr.xpath("./td[1]/text()")
                # print(text)
                colNum = 2
                if text:
                    for t in text:
                        xlst.cell(row=postNum, column=colNum).value = x_strip(t)
                        colNum += 1
                pic = tr.xpath("./td[2]/img/@src")
                if pic:
                    pic1 = pic[0]
                    download_pic((urlRoot + pic1), pic1.replace('/gba/screen/gba/item/screenshot', picPath))
                    print(pic1)
                pic = tr.xpath("./td[3]/img/@src")
                if pic:
                    pic1 = pic[0]
                    download_pic((urlRoot + pic1), pic1.replace('/gba/screen/gba/item/screenshot', picPath))
                    print(pic1)
                print()
            # stPosts.cell(row=postNum, column=columnNum).value = txt.strip()
            iCount += 1

        # 保存excel的修改结果
        xlwb.save(xFile)

def getNetBian():
    for i in range(2, 30):
        pageUrl = 'http://www.netbian.com/meinv/index_{}.htm'.format(i)
        res = rSession.get(pageUrl, headers=rHeads, verify=False, timeout=(connectTimeout, readTimeout))
        res.raise_for_status()
        postContents = page_coding(res).xpath("//div[@class='list']//a")
        print('page:', i)
        for item in postContents:
            pic = item.xpath("./img/@src")
            if pic:
                picUrl = pic[0]
                download_pic(picUrl, str.split(picUrl, '/')[-1])
                print(str.split(picUrl, '/')[-1])

getNetBian()
