# -*- coding: utf-8 -*-
import scrapy
import re
from pymysql import *
import logging
class NewsDetailSpider(scrapy.Spider):
    handle_httpstatus_list = [404]
    name = 'news_detail'
    allowed_domains = ['cignacmb.com']
    # start_urls = ['http://cignacmb.com/']
    conn = connect(  # host='localhost',
        host='218.28.2.162',
        port=3306,
        database='bx_caiji',
        user='caiji_user',
        # password='xyb.1206',
        password='bxgj2019',
        charset="utf8",
    )
    cs = conn.cursor()

    # red = redis.StrictRedis(host='192.168.3.191',
    #                              port=6379,
    #                              db=8)

    def start_requests(self):
        sql = "SELECT url,from_site,uid from bx_news_url where status = 0 AND from_site > 0"
        self.conn.ping(reconnect=True)
        rt = self.cs.execute(sql)
        rt_list = self.cs.fetchall()
        # rt_list = [('http://www.cignacmb.com/baoxianzhishi/yiliao/20180309010.html',1,4312)]
        # rt_list = [('http://www.cignacmb.com/baoxiananli/qita/2018112719941.html',2,1231)]
        print(rt_list)
        for url in rt_list:
            # print(url)
            detail_url = url[0]
            # print(detail_url)
            type = url[1]
            yield scrapy.Request(
                url=detail_url,
                callback=self.parse,
                meta={"type":type,"uid":url[2]}
                # errback=self.parse_err,
            )
            # break
    def parse(self, response):
        type = response.meta["type"]
        uid = response.meta["uid"]
        print(type)
        if type in [1 , 2]: # 保险知识 保险案例
            stat = response.status
            print(stat)
            if stat != 404:
                title = response.xpath("//h1/text()").extract_first()
                if title:
                    # title1 = response.xpath("//div[@class='news-con']/div[@class='news-title']/h1/text()").extract_first()
                    writer = '无'
                    from_source = response.url
                    # addtime = int(time.time())
                    see_num = response.xpath("//div[@class='time-source']/span[3]//@src").extract_first()
                    see_num_url = "http://www.cignacmb.com/"+see_num
                    abstract = response.xpath("//div[@class='abstract']/p//text()").extract_first()
                    content = response.xpath("//div[@class='zx-content']").extract_first()
                    keywords = response.xpath("//meta[@name='keywords']/@content").extract_first()
                    description = response.xpath("//meta[@name='description']/@content").extract_first()
                    item = dict(  # 放入字典
                        title=title,
                        writer=writer,
                        url=from_source,
                        abstract=abstract,
                        content=content,
                        keywords=keywords,
                        description=description,
                        uid=uid,
                        type=type
                    )
                    # print(item)
                    yield scrapy.Request(
                        url=see_num_url,
                        callback=self.parse1,
                        meta={"type": type,"item":item}
                        # errback=self.parse_err,
                    )
                else:
                    update_sql = 'UPDATE bx_news_url SET status=2 WHERE uid=%s'
                    self.conn.ping(reconnect=True)
                    ret2 = self.cs.execute(update_sql, uid)
                    self.conn.commit()
                    print("页面错误！")
            else:
                update_sql = 'UPDATE bx_news_url SET status=2 WHERE uid=%s'
                self.conn.ping(reconnect=True)
                ret2 = self.cs.execute(update_sql, uid)
                self.conn.commit()
                logging.info("页面404！！！！！！！！")
                print("页面404！")
        elif type == 3: # 问答
            stat = response.status
            # print(stat)
            if stat != 404:
                ask_title = response.xpath("//div[@class='wjcon-1']/span/text()").extract_first()
                if ask_title:
                    ask_content = response.xpath("//div[@class='wjcon-1']/p/text()").extract_first()
                    ans = response.xpath("//div[@class='wjcon-2 mb20']")
                    ans_list = []
                    for i in ans:
                        ans_list.append(i.xpath(".//text()").extract())
                    from_url = response.url
                    from_type = 4
                    item = dict(  # 放入字典
                        ask_title=ask_title,
                        ask_content=ask_content,
                        ans_list=str(ans_list),
                        from_url=from_url,
                        from_type=from_type,
                        uid=uid,
                        type = type
                    )
                    # print(item)
                    yield item
                else:
                    update_sql = 'UPDATE bx_news_url SET status=2 WHERE uid=%s'
                    self.conn.ping(reconnect=True)
                    ret2 = self.cs.execute(update_sql, uid)
                    self.conn.commit()
                    print("页面错误！")
            else:
                update_sql = 'UPDATE bx_news_url SET status=2 WHERE uid=%s'
                self.conn.ping(reconnect=True)
                ret2 = self.cs.execute(update_sql, uid)
                self.conn.commit()
                logging.info("页面404！！！！！！！！")
                print("页面404！")

    def parse1(self,response):
        item = response.meta["item"]
        # print(response.text)
        num = re.search(r"\d+",response.text)
        if num:
            item['see_num'] = num.group()
        # print(item)
        yield item