#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2017/8/2 14:55
# @Author  : Fairy Huang
# @File    : myspider.py
# @Project: LabCrawler
import settings
import pymysql
import redis
import json
import datetime
from urllib.parse import urlparse
from scrapy_redis.spiders import RedisSpider

from utils import common, errors
from settings import REDIS_CONN


class MySpider(RedisSpider):
    name = 'myspider'

    def __init__(self, **kwargs):
        super(MySpider, self).__init__(**kwargs)
        self.get_log()
        # self.get_account()
        self.redis_cli = redis.StrictRedis(**REDIS_CONN)

    def get_log(self):
        self.log = common.log_helper(self.name)

    def get_domain(self, url):
        parsed_uri = urlparse(url)
        domain = '{uri.scheme}://{uri.netloc}'.format(uri=parsed_uri)
        return domain

    def get_taskid(self, resp_text):
        json_data = json.loads(common.extract_json(resp_text))
        task_id = json_data.get("task_id", 0)
        self.log.logger.debug('Get Task ID: {0}'.format(task_id))
        return task_id

    def get_keyword(self, task_id):
        return self.get_redis_str("task_id:{0}:keyword".format(task_id))

    def get_url_params(self, task_id):
        db = pymysql.Connect(**settings.DB_CONN_DICT)

        # 获取游标
        cur = db.cursor()
        # 执行sql语句，进行查询
        sql = "SELECT su.url, su.method, su.params, su.key_param, su.page_param, su.page_size \
        FROM site_urls AS su, crawler_task as ct WHERE su.site_id = ct.site_id AND ct.id = %s" % task_id
        cur.execute(sql)
        result = cur.fetchall()
        if not result:
            self.log.logger.error("No site url params found for task_id: {0}!".format(task_id))
            raise errors.DBNotFoundError
        cur.close()
        db.close()

        url, method, params, key_param, page_param, page_size = result[0]
        params = json.loads(params)
        keyword = self.get_redis_str("task_id:{0}:keyword".format(task_id))
        params.update({key_param: keyword, page_param: 1})

        return url, params

    def get_redis_str(self, key):
        key_bytes = self.redis_cli.get(key)
        key_str = common.bytes_to_str(key_bytes)
        return key_str

    def is_task_done(self, task_id):
        request_cnt = self.get_redis_str("task_id:{0}:request_cnt".format(task_id))
        crawled_cnt = self.get_redis_str("task_id:{0}:crawled_cnt".format(task_id))
        if int(crawled_cnt) < int(request_cnt):
            return False
        else:
            return True

    def set_task_done(self, task_id):
        request_cnt = self.redis_cli.get("task_id:{0}:request_cnt".format(task_id))
        self.redis_cli.set("task_id:{0}:crawled_cnt".format(task_id), request_cnt)
        db = pymysql.Connect(**settings.DB_CONN_DICT)
        # 获取游标
        cur = db.cursor()
        # 执行sql语句，进行更新
        sql = "UPDATE crawler_task SET crawl_end_time='%s' WHERE id=%s" % (datetime.datetime.now(), task_id)
        cur.execute(sql)
        db.commit()
        cur.close()
        db.close()