# -*- coding: utf-8 -*-
import scrapy
import MySQLdb
import datetime
import time
from scrapy.http import Request
from urllib import parse

from horsveision.items import CsrcItem


class HuaxiaSpider(scrapy.Spider):
    name = 'huaxia'
    allowed_domains = ['www.chinatimes.net.cn']
    start_urls = ['http://www.chinatimes.net.cn/finance/zhengquan']
    db = MySQLdb.connect('localhost', 'root', '698a80e4212ba38f', 'toujiao', charset='utf8', use_unicode=True)
    # db = MySQLdb.connect('192.168.100.101', 'root', 'root', 'haohsi', charset='utf8', use_unicode=True)

    def parse(self, response):
        now_date = datetime.datetime.now().strftime('%Y-%m-%d')
        now_time = time.mktime(time.strptime(now_date, '%Y-%m-%d'))
        content_list = response.css(".list_news>div")

        for index in range(10):
            if len(content_list[index].css(".info .time::text").extract()) == 1:
                send_new_date = content_list[index].css(".info .time::text").extract()[0]
            else:
                send_new_date = content_list[index].css(".info .time font::text").extract()[0]
            send_new_date = send_new_date.split(' ', 1)
            if len(send_new_date) == 1:
                send_new_date = datetime.datetime.now().strftime('%Y-%m-%d')
            if len(send_new_date) == 2 and send_new_date[0] == '今天' and send_new_date[1] != '':
                send_new_date = datetime.datetime.now().strftime('%Y-%m-%d')
            elif len(send_new_date) == 2 and send_new_date[1] == '':
                send_new_date = datetime.datetime.now().strftime('%Y-%m-%d')

            send_new_time = time.mktime(time.strptime(send_new_date, '%Y-%m-%d'))
            if int(now_time) == int(send_new_time):
                new_title = content_list[index].css("a img::attr(title)").extract()[0]
                with self.db.cursor() as cursor:
                    sql = "SELECT title FROM spider_content WHERE title='%s'" % new_title
                    cursor.execute(sql)
                    result = cursor.fetchone()
                    if result is None:
                        cover_img = content_list[index].css("a img::attr(src)").extract()[0]
                        parse_url = content_list[index].css("a::attr(href)").extract()[0]
                        yield Request(url=parse.urljoin(response.url, parse_url), callback=self.parse_detail,
                                      dont_filter=True, meta={"cover_img": parse.urljoin(response.url, cover_img)})

    def parse_detail(self, response):
        new_title = response.css(".content .title h1::text").extract()[0]
        new_source = response.css("#source_baidu::text").extract()[0].replace("来源：", "")
        send_date = response.css("#pubtime_baidu::text").extract()[0].replace("发布时间：", "")
        new_content = response.css(".content .infoMain p").extract()
        content_all = ''.join(new_content)
        cover_img = response.meta.get("cover_img", "")

        huaxiaItem = CsrcItem()

        huaxiaItem['title'] = new_title
        huaxiaItem['send_time'] = int(time.mktime(time.strptime(send_date, '%Y-%m-%d %H:%M:%S')))
        huaxiaItem['source'] = new_source
        huaxiaItem['content'] = content_all
        huaxiaItem['cover_img'] = cover_img

        yield huaxiaItem
