# -*- coding: utf-8 -*-
import MySQLdb

import scrapy
import datetime
import time
from scrapy.http import Request
from urllib import parse
from horsveision.items import CsrcItem


class SseSpider(scrapy.Spider):
    name = 'sse'
    allowed_domains = ['www.sse.com.cn']
    start_urls = ['http://www.sse.com.cn/aboutus/mediacenter/hotandd/']
    db = MySQLdb.connect('localhost', 'root', '698a80e4212ba38f', 'toujiao', charset='utf8', use_unicode=True)
    # db = MySQLdb.connect('192.168.100.101', 'root', 'root', 'haohsi', charset='utf8', use_unicode=True)

    def parse(self, response):
        list_contents = response.css("#sse_list_1 dl dd")
        now_date = datetime.datetime.now().strftime('%Y-%m-%d')
        now_time = time.mktime(time.strptime(now_date, '%Y-%m-%d'))
        for list_content in list_contents:
            reslease_date = list_content.css("span::text").extract()[0]
            reslease_time = time.mktime(time.strptime(reslease_date, '%Y-%m-%d'))
            if int(reslease_time) == int(now_time):
                title = list_content.css('a::attr(title)').extract_first('')
                with self.db.cursor() as cursor:
                    sql = "SELECT title FROM spider_content WHERE title='%s'" % title
                    cursor.execute(sql)
                    result = cursor.fetchone()
                    if result is None:
                        parse_url = list_content.css('a::attr(href)').extract()[0]
                        yield Request(url=parse.urljoin(response.url, parse_url), callback=self.parse_detail,
                                      dont_filter=True)

    def parse_detail(self, response):
        url = response.url
        if url.endswith(('.pdf')):
            content = url
            title = ''
            send_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        else:
            title = response.css(".article-infor h2::text").extract()[0]
            content = response.css(".article-infor .allZoom p").extract()

            send_time = response.css(".article-infor .article_opt i::text").extract()[0]

        source = "上海证券交易所"

        sse_item = CsrcItem()

        sse_item['title'] = title
        sse_item['send_time'] = int(time.mktime(time.strptime(send_time, '%Y-%m-%d %H:%M:%S')))
        sse_item['source'] = source
        sse_item['content'] = content
        sse_item['cover_img'] = ''

        yield sse_item
