# import requests
# import time
# import random
# import csv
# from lxml import html

# # 设置请求头
# headers = {
#     'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/117.0'
# }

# # CSV文件准备
# csv_file = open('douban_top250.csv', 'w', newline='', encoding='utf-8-sig')
# writer = csv.writer(csv_file)
# writer.writerow(['Rank', 'Title', 'Score', 'Reviews', 'Quote', 'Actors'])

# # 主爬虫函数
# def crawl_douban_top250():
#     base_url = "https://movie.douban.com/top250?start="
#     for i in range(10):
#         start = i * 25
#         url = base_url + str(start)
#         try:
#             print(f"Fetching page {i+1} 👉 {url}")
#             response = requests.get(url, headers=headers, timeout=10)
#             response.raise_for_status()
#             tree = html.fromstring(response.text)

#             items = tree.xpath('//ol[@class="grid_view"]/li')
#             for item in items:
#                 rank = item.xpath('.//em/text()')[0]
#                 title = item.xpath('.//span[@class="title"][1]/text()')[0]
#                 score = item.xpath('.//span[@class="rating_num"]/text()')[0]
#                 reviews = item.xpath('.//div[@class="star"]/span[last()]/text()')[0].replace('人评价', '')
#                 quote = item.xpath('.//span[@class="inq"]/text()')
#                 quote = quote[0] if quote else ''
#                 actors = item.xpath('.//p[1]/text()')
#                 actors = ''.join([a.strip() for a in actors if a.strip()])

#                 writer.writerow([rank, title, score, reviews, quote, actors])

#             sleep_time = random.uniform(2, 5)
#             print(f"Sleeping {sleep_time:.2f} seconds 😴\n")
#             time.sleep(sleep_time)

#         except Exception as e:
#             print(f"Oops, something broke on page {i+1} 😢: {e}")
#             continue

#     csv_file.close()
#     print("🎉 Done! Data saved to douban_top250.csv")

# # 启动爬虫
# crawl_douban_top250()



# import requests
# import time
# import random
# import csv
# from lxml import html

# headers = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36'
# }

# with open('douban_top250.csv', 'w', newline='', encoding='utf-8-sig') as csv_file:
#     writer = csv.writer(csv_file)
#     writer.writerow(['Rank', 'Title', 'Score', 'Reviews', 'Quote', 'Actors'])

#     def crawl_douban_top250():
#         base_url = "https://movie.douban.com/top250?start="
#         for i in range(10):
#             start = i * 25
#             url = base_url + str(start)
#             try:
#                 print(f"Fetching page {i+1} 👉 {url}")
#                 response = requests.get(url, headers=headers, timeout=10)
#                 print(f"Status code: {response.status_code}")
#                 response.raise_for_status()
#                 tree = html.fromstring(response.text)

#                 items = tree.xpath('//ol[@class="grid_view"]/li')
#                 print(f"Found {len(items)} items on page {i+1}")
#                 for item in items:
#                     rank = item.xpath('.//em/text()')
#                     title = item.xpath('.//span[@class="title"][1]/text()')
#                     score = item.xpath('.//span[@class="rating_num"]/text()')
#                     reviews = item.xpath('.//div[@class="star"]/span[last()]/text()')
#                     quote = item.xpath('.//span[@class="inq"]/text()')
#                     actors = item.xpath('.//p[1]/text()')

#                     print(f"Rank: {rank}, Title: {title}, Score: {score}, Reviews: {reviews}, Quote: {quote}, Actors: {actors}")

#                     rank = rank[0] if rank else ''
#                     title = title[0] if title else ''
#                     score = score[0] if score else ''
#                     reviews = reviews[0].replace('人评价', '') if reviews else ''
#                     quote = quote[0] if quote else ''
#                     actors = ''.join([a.strip() for a in actors if a.strip()])

#                     writer.writerow([rank, title, score, reviews, quote, actors])
#                     csv_file.flush()

#                 sleep_time = random.uniform(2, 5)
#                 print(f"Sleeping {sleep_time:.2f} seconds 😴\n")
#                 time.sleep(sleep_time)

#             except requests.exceptions.HTTPError as http_err:
#                 print(f"HTTP error on page {i+1}: {http_err}")
#             except requests.exceptions.RequestException as req_err:
#                 print(f"Request error on page {i+1}: {req_err}")
#             except Exception as e:
#                 print(f"Unexpected error on page {i+1}: {e}")

#     crawl_douban_top250()



import requests
import time
import random
import csv
from lxml import html
from tabulate import tabulate  # For terminal table display

# 设置请求头
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Accept-Language': 'en-US,en;q=0.5'
}

# CSV文件准备，使用中文表头
with open('douban_top250.csv', 'w', newline='', encoding='utf-8-sig') as csv_file:
    writer = csv.writer(csv_file)
    # 中文表头，匹配图片中的格式
    headers_csv = ['排名', '电影名', '得分', '评价人数', '描述']
    writer.writerow(headers_csv)

    # 收集数据以便在终端显示表格
    table_data = []

    # 主爬虫函数
    def crawl_douban_top250():
        base_url = "https://movie.douban.com/top250?start="
        for i in range(10):
            start = i * 25
            url = base_url + str(start)
            try:
                print(f"Fetching page {i+1} 👉 {url}")
                response = requests.get(url, headers=headers, timeout=10)
                print(f"Status code: {response.status_code}")
                response.raise_for_status()
                tree = html.fromstring(response.text)

                items = tree.xpath('//ol[@class="grid_view"]/li')
                print(f"Found {len(items)} items on page {i+1}")
                if not items:
                    print("No items found. Check the XPath for '//ol[@class=\"grid_view\"]/li'.")
                    continue

                for item in items:
                    rank = item.xpath('.//em/text()')
                    title = item.xpath('.//span[@class="title"][1]/text()')
                    score = item.xpath('.//span[@class="rating_num"]/text()')
                    reviews = item.xpath('.//div[@class="star"]/span[last()]/text()')
                    quote = item.xpath('.//span[@class="inq"]/text()')
                    actors = item.xpath('.//p[1]/text()')

                    # 确保数据不为空
                    rank = rank[0] if rank else 'N/A'
                    title = title[0] if title else 'N/A'
                    score = score[0] if score else 'N/A'
                    reviews = reviews[0].replace('人评价', '') if reviews else 'N/A'
                    quote = quote[0] if quote else ''
                    actors = ''.join([a.strip() for a in actors if a.strip()])

                    # 合并引言和演员信息为描述字段，模仿图片中的格式
                    description = f"{quote} {actors}".strip()

                    # 写入CSV
                    writer.writerow([rank, title, score, reviews, description])
                    csv_file.flush()

                    # 收集数据以便终端显示
                    table_data.append([rank, title, score, reviews, description])

                sleep_time = random.uniform(2, 5)
                print(f"Sleeping {sleep_time:.2f} seconds \n")
                time.sleep(sleep_time)

            except requests.exceptions.HTTPError as http_err:
                print(f"HTTP error on page {i+1}: {http_err}")
            except requests.exceptions.RequestException as req_err:
                print(f"Request error on page {i+1}: {req_err}")
            except Exception as e:
                print(f"Unexpected error on page {i+1}: {e}")
                continue

    # 启动爬虫
    crawl_douban_top250()

    # 在终端显示表格
    if table_data:
        print("\n Scraped Data in Table Format:")
        print(tabulate(table_data, headers=headers_csv, tablefmt="grid", stralign="left"))
    else:
        print("\nNo data scraped. Please check the XPath selectors or network connection.")

print(" Done! Data saved to douban_top250.csv")