import requests
import json
import re
import time
import os.path
import csv
from bs4 import BeautifulSoup


HEADERS = {
    'Accept': '*/*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Content-Type': 'text/plain',
    'Origin': 'https://www.maoyan.com',
    'Referer': 'https://www.maoyan.com/',
    'Sec-Ch-Ua': '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
    'Sec-Ch-Ua-Mobile': '?0',
    'Sec-Ch-Ua-Platform': '"Windows"',
    'Sec-Fetch-Dest': 'empty',
    'Sec-Fetch-Mode': 'cors',
    'Sec-Fetch-Site': 'cross-site',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
}


class GetTop100():
    def __init__(self):
        self.num = 0
        self.movie_info = {}
        self.page_urls = []


    def get_one_page_data(self, url):
        response = requests.get(url, headers=HEADERS)
        html = response.text
        print(html)
        soup = BeautifulSoup(html, 'lxml')
        for movie_item_info in soup.find_all('dd'):
            top_score = movie_item_info.find_all('i')
            top = top_score[0].get_text(strip=True)
            score = int(top_score[1].get_text(strip=True).replace('.', ''))+int(top_score[2].get_text(strip=True))*0.1
            img_tag = movie_item_info.find('img', class_='board-img')
            title = img_tag.get('alt', '')
            img_src = img_tag.get('data-src', '')
            actors = movie_item_info.find('p', class_='star').get_text(strip=True).replace('主演：', '')
            releasetime = movie_item_info.find('p', class_='releasetime').get_text(strip=True)
            releasetime = re.search('\d{4}-\d{2}-\d{2}', releasetime).group()
            _={}
            _['top'] = top
            _['score'] = score
            _['img_src'] = img_src
            _['actors'] = actors
            _['releasetime'] = releasetime
            self.movie_info[title] = _


    def get_page_url(self, url):
        for i in range(0, 91, 10):
            page_url = url + '?offset=' + str(i)
            self.page_urls.append(page_url)


    @staticmethod
    def writer(data, csv_file_name='movies.csv', encoding='utf-8'):
        if os.path.isfile(csv_file_name): # 文件存在，追加数据
            with open(csv_file_name, mode='a', newline='', encoding='utf-8') as csvfile:  
                fieldnames = ['title'] + list(next(iter(data.values())).keys())
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
                for title, attributes in data.items():
                    row_data = {'title': title}
                    row_data.update(attributes)
                    writer.writerow(row_data)
        else: # 文件不存在，创建文件插入数据
            with open(csv_file_name, encoding, mode='w', newline='') as csvfile:
                fieldnames = ['title'] + list(next(iter(data.values())).keys()) # 定义CSV列标题，首列为"title"，余下列标题从字典的键值对中提取
                writer = csv.DictWriter(csvfile, fieldnames=fieldnames) # 创建一个CSV的DictWriter对象
                writer.writeheader() # 写入列标题
                for title, attributes in data.items(): # 遍历字典，写入各行数据
                    row_data = {'title': title} # 将标题信息加入到每一行的字典数据中
                    row_data.update(attributes)
                    writer.writerow(row_data) # 写入行数据


if __name__ == "__main__":
    target_url = 'https://www.maoyan.com/board/4'
    getTop = GetTop100()
    getTop.get_page_url(target_url)
    for i in getTop.page_urls:
        getTop.get_one_page_data(target_url)
        print(getTop.movie_info)
        time.sleep(120)
        getTop.writer(data=getTop.movie_info)
        getTop.movie_info = {}

        
