from pyquery import PyQuery as pq
import requests
import yaml
import os
import time
from Crawler.utils.tools import moive_is_existed
from Crawler.pipeline import save_sakura_data


# C:\Users\YONG\Desktop\MoiveLand\Crawler\tasks\csakura.yaml
sakura_config = yaml.full_load(open(r'.\crawler\tasks\sakura.yaml', encoding='utf-8'))

class SarukrCrawler:

    def __init__(self, *args, **kwargs):
        self.task_name = kwargs.get('name')
        self.orinal_list_url = kwargs.get('site')
        self.domian = kwargs.get('domian')
        self.active = kwargs.get('active')
        self.status = kwargs.get('status')
        if not self.status:
            raise Exception('status属性请设置为True')
        self.headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
                      'Content-Type': 'text/heml;charset=utf-8'}


    def get_content(self, url):
        time.sleep(1)
        res = requests.get(url, headers=self.headers)
        res.encoding = None
        if res.status_code == 200:
            return res.text
        else:
            return None

    def get_list_task(self):
        # 启动任务周期
        if self.status == 'init':
            # 初始化
            # http://129.204.87.3:8877/getsortdata_all_z.php?action=mov&page=1&year=0&area=all&class=0
            template_list_url = self.orinal_list_url.replace('page=1', 'page=%d')
            n = 1
            while True:
                if n == 1:
                    yield {"task_url": self.orinal_list_url}
                    n += 1
                else:
                    yield {"task_url": template_list_url%n}
                    n += 1        

        elif self.status=='update':
            # 更新
             yield {"task_url": self.orinal_list_url}
        else:
            raise Exception('请设置正确得status')

    
    def __parser_list(self, content):
        # 解析列表，分发详情任务
        def parse_url(content):
            if len(content) > 11:
                # http//zimiyy.com/mov/39736/
                return content
            else:
                # /mov/39736/
                return 'http://www.yhdm6.com' + content

        details_tasks = []
        doc = pq(content)
        item_list = doc('li.mb')  # 匹配所有class 属性中带有mb的li标签
        # http://www.yhdm6.com mov/39736/   http//zimiyy.com/mov/39736/
        for tag in item_list.items():
            movie_url = parse_url(tag('a').attr('href'))
            movie_name = tag('a').attr('title')
            movie_cover = tag('div.img > img').attr('src')
            details_tasks.append({'movie_name':movie_name, 'movie_url':movie_url, 'movie_cover':movie_cover})
        return details_tasks

    def __parser_detail(self, content, task):
        """
        task : {'moive_name': '普通的爱', 'moive_url': 'http://www.yhdm6.com/mov/56668/', 'moive_cover': 'http://test99.1yltao.com:9011/upload/202006151592191476.jpg'}
        """

        def area_publishtime(content):
            if content:
                if len(content)==1:
                    if content[0].isdigit():
                        return '' , content[0] # ['', '2011'] 

                    else:
                        return content[0], '' # ['中国', ''] 
                else:
                    return content # ['中国', '2011'] 
            else:    
                return '', ''


        doc = pq(content)
        artists = doc('div.info > dl > dd:nth-child(2)').text()[3:].split(' ')
        categorys = doc('div.info > dl > dd:nth-child(4) > a').text().split(' ')
        area, year = area_publishtime(doc('div.info > dl > dd:nth-child(3)').remove('b').text().split(' '))  # ['中国', '2011'] 
        desc = doc('div.info > dl > dt.desd > div > div.des2').text()
        task['perfomer'] = artists
        task['category'] = categorys
        task['area'] = area
        task['publish_year'] = year
        task['desc'] = desc
        return task

    def run(self):
        n = 5
        
        for task in self.get_list_task():
            list_content = self.get_content(task['task_url'])
            if list_content:
                # 有内容
                details_tasks = self.__parser_list(list_content)
                if isinstance(details_tasks, list) and details_tasks:
                    for task in details_tasks:
                        if moive_is_existed(task['movie_name']):
                            print('%s 电影详情已经存在， 不再抓取'% task['movie_name'])
                            continue
                        print('准备抓取%s'%task['movie_name'])
                        detail_content = self.get_content(task['movie_url'])  # 获取电影详情内容
                        if detail_content:
                            movie_info = self.__parser_detail(detail_content, task)
                            # print(movie_info)
                            print(movie_info)
                            push_results = save_sakura_data(movie_info)
                            if push_results:
                                print(task['movie_name'],'入库成功')
                            else:
                                print(task['movie_name'],'入库失败')
                        
                        else:
                            print("%s-请求错误"%task['movie_name'])
                            continue
                else:
                    break
            else:
                break




if __name__ == "__main__":
    s = SarukrCrawler(**sakura_config)
    s.run()