import requests
from bs4 import BeautifulSoup
import pandas as pd
from fake_useragent import UserAgent
import matplotlib.pyplot as plt
import seaborn as sns

def get_cookies(url):
    headers = {
        'User-Agent': UserAgent().edge
    }
    try:
        response = requests.get(url, headers=headers)
        response.raise_for_status()  # 检查响应状态码
        return response.cookies.get_dict()
    except requests.RequestException as e:
        print(f"Failed to get cookies: {e}")
        return {}

class DoubanMovieScraper:
    def __init__(self, movie_id, pages, cookies=None):
        self.movie_id = movie_id
        self.pages = pages
        self.cookies = cookies if cookies else {}
        self.headers = {
            'User-Agent': UserAgent().edge
        }
        self.all_comments = []
        self.all_ratings = []

    def fetch_data(self):
        base_url = f'https://movie.douban.com/subject/{self.movie_id}/comments?'
        for page in range(self.pages):
            start = page * 20
            url = f'{base_url}start={start}&limit=20&status=P&sort=new_score'
            try:
                response = requests.get(url, headers=self.headers, cookies=self.cookies)
                response.raise_for_status()  # 检查响应状态码
                soup = BeautifulSoup(response.text, 'html.parser')
                self.parse_data(soup)
            except requests.RequestException as e:
                print(f"Request failed: {e}")

    def parse_data(self, soup):

        try:
            comments_and_ratings = []
            for item in soup.find_all('div', class_='comment-item'):
                comment = item.find('span', class_='short')
            if comment:
                comment = comment.text.strip()
            else:
                comment = None

            rating_tag = item.find('span', title='(.*?)')
            rating = rating_tag.get('title', '')[:1] if rating_tag and rating_tag.get('title') else None

            comments_and_ratings.append((comment, rating))

            # 一次性添加到目标列表
            self.all_comments.extend([c[0] for c in comments_and_ratings])
            self.all_ratings.extend([c[1] for c in comments_and_ratings])

        except Exception as e:

            import traceback

        print(f"Error processing comments: {e}")
        traceback.print_exc()



    def save_data(self, filename='data.csv'):
        df = pd.DataFrame({'Comment': self.all_comments, 'Rating': self.all_ratings})
        df.to_csv(filename, index=False)

    def clean_data(self):
        df = pd.read_csv('data.csv')
        df.dropna(inplace=True)
        return df

    def analyze_data(self, df):
        print("Basic statistics:")
        print(df['Rating'].describe())

        plt.figure(figsize=(10, 6))
        sns.countplot(x='Rating', data=df, order=[1, 2, 3, 4, 5])
        plt.title('Distribution of Ratings')
        plt.xlabel('Rating')
        plt.ylabel('Count')
        plt.show()

    def run(self):
        self.fetch_data()
        self.save_data()
        df = self.clean_data()
        print("Data analysis ok")
        # self.analyze_data(df)

# 使用示例
if __name__ == "__main__":
    url = 'https://movie.douban.com/subject/25861610/'  # 《熊出没之年货》的movie_id
    # cookies = get_cookies(url)
    scraper = DoubanMovieScraper(movie_id=25861610, pages=15)
    scraper.run()
