from selenium import webdriver
from selenium.common import TimeoutException
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import logging, json, re
from os import makedirs
from os.path import exists


RESULTS_DIR = 'results'
exists(RESULTS_DIR) or makedirs(RESULTS_DIR)


logging.basicConfig(level=logging.INFO, format='%(asctime)s '
                                                     '- %(levelname)s '
                                                     '- %(message)s')

class Scrapyssr3():

    def __init__(self):
        self.base_url = 'https://admin:admin@ssr3.scrape.center/page/{page}'
        self.driver_path = 'D:\chromedriver-win64\chromedriver.exe'
        options = Options()
        options.add_argument('--headless')  # 无头浏览
        self.browser = webdriver.Chrome(service=Service(self.driver_path),
                                        options=options)
        self.find = self.browser.find_element
        self.finds = self.browser.find_elements
        self.max_page = 10


    def scrape_page(self, url, locator):
        logging.info('scraping pege %s...' % url)
        try:
            self.browser.get(url)
        except TimeoutException:
            logging.error('error occurred while scraping %s', url, exc_info=True)

    def scrapy_index(self, page):
        url = self.base_url.format(page=page)
        self.scrape_page(url, locator=(By.CSS_SELECTOR, '#index .item'))

    def parse_index(self):
        elements = self.finds(By.CSS_SELECTOR, '#index .item .name')
        for element in elements:
            href = element.get_attribute('href')
            yield f'{href}'

    def scrape_detail(self, url):
        self.scrape_page(url,
                    locator=(By.TAG_NAME, 'h2'))
    # 解析详情页
    def parse_detail(self):
        url = self.browser.current_url
        logging.info('scraping detail...')
        name = self.find(By.CSS_SELECTOR, 'h2').text
        categories = [element.text for element in self.finds(By.CSS_SELECTOR, '.categories button span')]
        score = self.find(By.CSS_SELECTOR, '.score').text
        drama = self.find(By.CSS_SELECTOR, '.drama p').text.strip()
        cover = self.find(By.CSS_SELECTOR, '.cover').get_attribute('src')
        if ':' in name:
            name = re.sub(':', '：', name)
        logging.info(f'scraping data: \n'
                     f'name: {name}\n'
                     f'categories: {categories}\n'
                     f'score: {score}\n'
                     f'drama: {drama}\n'
                     f'cover: {cover}\n'
                     )
        return {
            'name': name,
            'categories': categories,
            'score': score,
            'drama': drama,
            'cover': cover
        }



    def save_data(self, data):
        if all(data.values()):  # 确保所有数据项搜非空
            name = data.get('name')
            data_path = f'{RESULTS_DIR}/{name}.json'
            with open(data_path, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
        else:
            logging.error('Incomplete data, not saving %s', data)


    def main(self):
        try:
            for page in range(1, self.max_page + 1):
                self.scrapy_index(page)
                detail_urls = self.parse_index()
                for detail_url in list(detail_urls):
                    logging.info('get detail url %s', detail_url)
                    self.scrape_detail(detail_url)
                    detail_data =self.parse_detail()
                    logging.info('detail data %s', detail_data)
                    self.save_data(detail_data)
                    logging.info('save detail data: %s', detail_data)
        finally:
            self.browser.close()




if __name__ == '__main__':
    ssr2 = Scrapyssr3()
    ssr2.main()