import time
from cgitb import html

import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import re

from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait

headers = {'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3'}

# url = 'http://steamcommunity.com/app/1426210/homecontent/?userreviewsoffset=' + str(10 * (i - 1)) + '&p=' + str(
#     i) + '&workshopitemspage=' + str(i) + '&readytouseitemspage=' + str(i) + '&mtxitemspage=' + str(
#     i) + '&itemspage=' + str(i) + '&screenshotspage=' + str(i) + '&videospage=' + str(i) + '&artpage=' + str(
#     i) + '&allguidepage=' + str(i) + '&webguidepage=' + str(i) + '&integratedguidepage=' + str(
#     i) + '&discussionspage=' + str(
#     i) + '&numperpage=10&browsefilter=toprated&browsefilter=toprated&appid=433850&appHubSubSection=10&l=schinese&filterLanguage=default&searchText=&forceanon=1'
# html = requests.get(url, headers=headers).text
file_handle = open('Age of Empires IV.txt', mode='a+', encoding='utf-8')
url = "https://steamcommunity.com/app/1466860/reviews/?browsefilter=trendyear&snr=15_reviews&p=1"
browser = webdriver.Chrome()
browser.get(url)
for i in range(1, 50):
    browser.execute_script("window.scrollTo(0,document.body.scrollHeight)")  # 滚动到底部
    time.sleep(1)
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')  # 如果装了lxml，推荐把解析器改为lxml
reviews = soup.find_all('div', {'class': 'apphub_Card'})
for review in reviews:
    nick = review.find('div', {'class': 'apphub_CardContentAuthorName'})
    title = review.find('div', {'class': 'title'}).text
    hour = review.find('div', {'class': 'hours'}).text.split(' ')[0]
    link = nick.find('a').attrs['href']
    comment = review.find('div', {'class': 'apphub_CardTextContent'}).text
    test = comment.split('\n')[2]
    s = re.sub(r'\t', '', test)
    file_handle.write(s+"\n")
print(len(reviews))
file_handle.close()
browser.close()
