import requests
from lxml import etree
import csv

bgg_url = 'https://www.boardgamegeek.com'
base_url = "https://www.boardgamegeek.com/browse/boardgame/page/"

rawheaders = """accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9
accept-encoding: gzip, deflate, br
accept-language: zh-CN,zh;q=0.9,en-GB;q=0.8,en;q=0.7
referer: https://www.boardgamegeek.com/
sec-fetch-dest: document
sec-fetch-site: same-origin
upgrade-insecure-requests: 1
user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"""

headers = dict([[h.partition(':')[0], h.partition(':')[2][1:]] for h in rawheaders.split('\n')])

def spider(wr):
    page2get = 1225
    for i in range(1,3):
        main_url = base_url + str(i)
        main_page = requests.get(main_url,headers=headers)
        with open('test.html','w',encoding='utf-8') as file:
            file.write(main_page.text)
        main_page = etree.HTML(main_page.text)
        main_lz = main_page.xpath('//*[@id="row_"]')
        for item in main_lz:
            name = item.xpath('td[3]/div[2]/a/text()')[0]
            sub_url = item.xpath('td[3]/div[2]/a/@href')[0]
            bg_id = sub_url.split('/')[-2]
            introduction = item.xpath('td[3]/p/text()')
            if len(introduction)>0:
                introduction = introduction[0]
                introduction = introduction.replace('\t','')
                introduction = introduction.replace('\n','')
                introduction = introduction.replace('\r','')
            else:
                introduction = " "
            # geekRating = item.xpath('td[4]/text()')[0][4:-2]
            # avgRating = item.xpath('td[5]/text()')[0][4:-2]
            # numVoters = item.xpath('td[6]/text()')[0][4:-2]
            geekRating = item.xpath('td[4]/text()')
            avgRating = item.xpath('td[5]/text()')
            numVoters = item.xpath('td[6]/text()')
            # x = requests.get('https://bgg-json.azurewebsites.net/thing/'+bg_id)
            # y = json.loads(x.text)
            # lz = [rank,name,sub_url,bg_id,introduction,geekRating,avgRating,numVoters]
            lz = [bg_id,name,introduction,geekRating,avgRating,numVoters]
            wr.writerow(lz)
            # print(name + " done")
        print(i,"done")

if __name__ == '__main__':
    with open("t.csv","w",encoding='utf-8',newline='') as csvf:
        writer = csv.writer(csvf)
        spider(writer)
    # with open('a.xml','w',encoding='utf-8') as f:
    #     html = requests.get(r'https://www.boardgamegeek.com/xmlapi/search?search=gloomhaven')
    #     f.writelines(html.text)



