# -*- coding: utf-8 -*-
import MySQLdb
from datetime import datetime
import time
import re
from urllib import parse
from scrapy import Request

import scrapy

from horsveision.items import CsrcItem


class SzseSpider(scrapy.Spider):
    name = 'szse'
    allowed_domains = ['www.szse.cn']
    start_urls = ['http://www.szse.cn/aboutus/trends/news/']
    # db = MySQLdb.connect('192.168.100.101', 'root', 'root', 'haohsi', charset='utf8', use_unicode=True)
    db = MySQLdb.connect('localhost', 'root', '698a80e4212ba38f', 'toujiao', charset='utf8', use_unicode=True)

    def parse(self, response):
        news_list = response.css(".g-content-list ul li .title")
        now_data = datetime.now().strftime("%Y-%m-%d")
        now_time = time.mktime(time.strptime(now_data, "%Y-%m-%d"))

        for new_list in news_list:
            release_date = new_list.css(".time::text").extract()[0].replace(' ', '').strip()
            release_time = time.mktime(time.strptime(release_date, '%Y-%m-%d'))
            if int(release_time) == int(now_time):
                new_content = new_list.css("script::text").extract()[0].replace(" ", '').strip()
                new_content_list = re.split(';', new_content)
                title = new_content_list[3].strip().replace("varcurTitle=", '')
                new_title = re.findall("\\'(.*)\\'", title)[0]
                with self.db.cursor() as cursor:
                    sql = "SELECT title FROM spider_content WHERE title='%s'" % new_title
                    cursor.execute(sql)
                    result = cursor.fetchone()
                    if result is None:
                        content_url = new_content_list[0].replace("varcurHref='", '').replace("'", '')
                        content_url = parse.urljoin(response.url, content_url)
                        yield Request(url=content_url, callback=self.pares_detail, dont_filter=True)

    def pares_detail(self, response):
        title = response.css(".des-header .title::text").extract()[0]
        send_time = response.css(".des-header .time span::text").extract()[0]
        content_list = response.css("#desContent p").extract()
        content_all = ''.join(content_list)
        source = "深圳证券交易所"

        szse_item = CsrcItem()

        szse_item['title'] = title
        szse_item['send_time'] = int(time.mktime(time.strptime(send_time, '%Y-%m-%d')))
        szse_item['source'] = source
        szse_item['content'] = content_all
        szse_item['cover_img'] = ''

        yield szse_item
