import requests
from bs4 import BeautifulSoup
import csv


def request_scrape(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/88.0.4324.146 Safari/537.36',
    }
    r = requests.get(url, headers=headers)
    if r.status_code == 200:
        return r.text
    else:
        print(f"Failed to retrieve data from {url}. Status code: {r.status_code}")
        return None


def parse_page(soup):
    data = soup.find_all(class_='el-card__body')
    # print(soup.prettify())
    result_data = []
    for i in data:
        name=i.find('h2', class_='m-b-sm')
        if name:
            result_name=name.text
        img= soup.find('img', class_='cover')['src']
        # buttons = soup.find_all('button', class_='el-button category el-button--primary el-button--mini')
        info = soup.find_all('div', class_='m-v-sm info')[0].text.strip()
        # print(info)
        score=soup.find('p', class_='score m-t-md m-b-n-sm').text
        result_data.append([result_name,img,info,score])

    return result_data


def save_to_csv(data, filename):
    with open(filename, 'a', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['名称', '图像','地区时长', '评分'])
        writer.writerows(data)


def main(page):
    url = f'https://ssr1.scrape.center/page/{page}'
    html = request_scrape(url)
    if html:
        soup = BeautifulSoup(html, 'lxml')
        result_data = parse_page(soup)
        if result_data:
            save_to_csv(result_data, filename='ScrapeT100.csv')


if __name__ == '__main__':
    for i in range(1, 11):
        main(i)
    print('Done!')
