# -*- coding: utf-8 -*-
import scrapy
import pymysql
import json
from youtube_spider.items import VideosItem,ChannelsItem
import time
import datetime
import logging
import youtube_spider.settings as settings
import youtube_spider.common as common


class VideosSpider(scrapy.Spider):
    name = 'videos'
    allowed_domains = ['googleapis.com']
    logger = None
    custom_settings = {
        'ITEM_PIPELINES': {
            'youtube_spider.pipelines.VideosPipeline': 300,
            'youtube_spider.pipelines.ChannelsPipeline': 300,
        }
    }

    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.mysql_conn()

    def mysql_conn(self):
        db_params = dict(
            host=settings.DB_HOST,
            db=settings.DB_NAME,
            password=settings.DB_PASSWORD,
            user=settings.DB_USER,
            port=settings.DB_PORT,
            charset=settings.DB_CHARESET,
            cursorclass=pymysql.cursors.DictCursor)
        self.db = pymysql.connect(**db_params)

        self.cursor = self.db.cursor()

    def start_requests(self):  
        #60天前
        now_time = time.time()
        published_after_date = datetime.datetime.fromtimestamp(now_time - 60 * 24 * 3600)
        published_after_utc = common.local2utc(published_after_date)
        
        #区域
        region_codes = ['US','DE','GB','AU']

        #关键字
        sql = "select * from yt_query where status = 1"
        self.cursor.execute(sql)
        results = self.cursor.fetchall()
        start_querys = []
        for row in results:
            start_querys.append(str(row['q']))

        for q in start_querys:
            for code in region_codes:
                request = self._build_request_search_list(q,code,published_after_utc,'')
                yield request
            
    
    def _build_request_search_list(self,q,region_code,published_after_utc,page_token):
        """
        搜索 search
        publishedAfter:UTC时间，大于now参数无效
        """
        request = scrapy.Request(
            'https://www.googleapis.com/youtube/v3/search?' 
            + 'key=' + settings.API_KEY 
            + '&part=snippet' + '&maxResults=50' + '&order=date'
            + '&type=video'
            + '&regionCode=' + region_code 
            + '&publishedAfter=' + published_after_utc
            + '&q=' + q
            + '&pageToken=' + page_token
            ,callback=self.parse_search_list
            )
        request.meta['q'] = q
        request.meta['published_after_utc'] = published_after_utc
        self.logger.warning(request.url)
        return request

    def _build_request_videos(self,id,q,region_code):
        """
        videos  视频详情
        id使用逗号分隔
        """
        request = scrapy.Request(
            'https://www.googleapis.com/youtube/v3/videos?' 
            + 'key=' + settings.API_KEY 
            + '&part=snippet,statistics' 
            + '&id=' + id
            ,callback=self.parse_videos
            )
        request.meta['q'] = q
        request.meta['region_code'] = region_code
        return request

    def _build_request_channels(self,id):
        """
        主播信息 
        id使用逗号分隔
        """
        request = scrapy.Request(
            'https://www.googleapis.com/youtube/v3/channels?' 
            + 'key=AIzaSyDMHDqvB_bLlSL1n8WhUl_84RDvENZl2PY' 
            + '&part=snippet,statistics' 
            + '&id=' + id
            ,callback=self.parse_channels
            )
        return request 

    def parse_search_list(self, response):
        """构建下一页request  构建video详情request"""
        result = json.loads(response.text)
        #self.logger.warning('ALL count' + str(result['pageInfo']['totalResults']))
        
        if len(result['items']):
            q = response.meta['q']
            region_code = result['regionCode']
            published_after_utc = response.meta['published_after_utc']
            #下一页
            if 'nextPageToken' in result:
                page_token = result['nextPageToken']
                yield self._build_request_search_list(q,region_code,published_after_utc,page_token)
            #videos #channels
            video_ids = [] 
            channel_ids = []
            for item in result['items']:
                if 'id' in item and 'videoId' in item['id']:
                    video_id = item['id']['videoId']
                    video_ids.append(video_id)
                if 'snippet' in item and 'channelId' in item['snippet']:
                    channel_id = item['snippet']['channelId']
                    channel_ids.append(channel_id)
            channel_ids = list(set(channel_ids))
            if video_ids:
                video_ids_str = ','.join(video_ids)
                yield self._build_request_videos(video_ids_str,q,region_code)
            if channel_ids:
                channel_ids_str = ','.join(channel_ids)
                yield self._build_request_channels(channel_ids_str)
               
    def parse_videos(self,response):
        result = json.loads(response.text)
        if len(result['items']):
            for item in result['items']:
                video_item = VideosItem()
                video_item['q'] = response.meta['q']
                video_item['video_id'] = item['id']  
                video_item['title'] = item['snippet']['title']
                video_item['description'] = item['snippet']['description']
                video_item['thumbnails'] = item['snippet']['thumbnails']['default']['url']
                video_item['view_count'] = item['statistics']['viewCount']
                video_item['region_code'] = response.meta['region_code']
                video_item['published_at'] = item['snippet']['publishedAt']
                video_item['channel_id'] = item['snippet']['channelId']
                video_item['collected_at'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                yield video_item

    def parse_channels(self,response): 
        result = json.loads(response.text)
        if len(result['items']):
            for item in result['items']:
                channel_item = ChannelsItem()
                channel_item['channel_id'] = item['id']  
                channel_item['title'] = item['snippet']['title']
                channel_item['thumbnails'] = item['snippet']['thumbnails']['default']['url']
                channel_item['view_count'] = item['statistics']['viewCount']
                channel_item['subscriber_count'] = item['statistics']['subscriberCount']
                channel_item['video_count'] = item['statistics']['videoCount']
                channel_item['published_at'] = item['snippet']['publishedAt']
                channel_item['collected_at'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                yield channel_item    

