import re
from sys import exc_info
from types import NoneType

import scrapy
from scrapy import Selector
import logging

from zhongyaoMedical.items import ZhongyaomedicalItem


from pymongo import MongoClient

# 连接到MongoDB，默认连接到localhost的27017端口
client = MongoClient('mongodb://erp:erp123@localhost:27017', maxPoolSize=10,
                     minPoolSize=3,
                     maxConnecting=30,
                     socketTimeoutMS=100000,
                     maxIdleTimeMS=60000,
                     connectTimeoutMS=40000,
                     waitQueueTimeoutMS=10000)

db = client['erp']

# collection = db['zhongyaomedical_page']
# collection_index = db['zhongyaomedical_index']

collection = db['zhongyaomedical_index']
collection_detail = db['zhongyaomedical_detail']

class ZhongyaomedicalspiderSpider(scrapy.Spider):
    name = "zhongyaoMedicalSpider"
    allowed_domains = ["www.zysj.com.cn"]
    start_urls = ["https://www.zysj.com.cn"]
    base_url = "https://www.zysj.com.cn/yianxinde/index.html"

    def start_requests(self):
        #######   index 爬取  ####

        # try:
        #     medical_cursor = collection.find({})
        #     for document in medical_cursor:
        #         zhongyaomedicalItem = ZhongyaomedicalItem(**document)
        #         page_url = zhongyaomedicalItem["page_url"]
        #         index = zhongyaomedicalItem["index"]
        #         yield scrapy.Request(url=page_url,callback=self.parse,meta=zhongyaomedicalItem)
        # except Exception as e:
        #     print(f"app error:{e}")

        try:
            medical_cursor = collection.find({}) .sort({"page_index":1}) #.skip(0).limit(1).sort({"page_index":1})
            for document in medical_cursor:
                zhongyaomedicalItem = ZhongyaomedicalItem(**document)
                detail_url = zhongyaomedicalItem["index_url"]
                yield scrapy.Request(url=detail_url,callback=self.parse,meta=zhongyaomedicalItem)
        except Exception as e:
            print(f"app error:{e}")
        pass

    def parse(self, response):

        #### index 爬取 #####
        # self.ZhongyaoMedicalScrapy(response)

        medical_detail = {}

        medical_detail["title"] = response.meta.get("title")
        medical_detail["pub_time"] = response.meta.get("pub_time")
        medical_detail["detail_url"] = response.meta.get("index_url")
        medical_detail["parent_url"] = response.meta.get("page_url")
        medical_detail["page_index"] = response.meta.get("page_index")

        h1_title = response.xpath('//*[@id="main"]/h1/text()').get()
        medical_detail["h1_title"] = h1_title

        infomations = response.xpath('//*[@id="information"]/div')
        for index,infomation in enumerate(infomations):
            try:
                clazz = infomation.xpath("@class").get()
                if(clazz):
                    clazz = re.sub("item ","",clazz)
                infoItems = infomation.xpath(".//div")
                for ind,info in enumerate(infoItems):
                    infoc = info.xpath("@class").get()
                    infoct = ""
                    if infoc is not NoneType:
                        infoc = re.sub("item-","_",infoc)
                        infoc = clazz+infoc
                        if(infoc=="summary_content" or infoc == "excerpt_content"):
                            infoct = info.xpath(".//p/text()")
                            print("infoct::>>>",infoct.__sizeof__())
                            if infoct is NoneType or infoct.__sizeof__() == 40:
                                infoct = info.xpath("text()")
                                print("infoct222222222222::>>>",infoct.__sizeof__())
                            infoct = infoct.get()

                        else:
                            infoct = info.xpath("text()").get()

                        infoct = re.sub("\n","",infoct)
                    medical_detail[infoc] = infoct


            except Exception as e:
                logging.error(f"app1111111 error:{e}",response.meta.get('index_url'),exc_info=True)
                medical_detail["error"] = 1

        contents = response.xpath('//*[@id="content"]/p')

        try:
            contentList = []
            for index,content in enumerate(contents):
                conten = content.xpath("text()")
                print("conten::>>>>",conten)
                if conten is NoneType or conten.__sizeof__() == 40:
                    continue
                contentList.append(conten)

            medical_detail["content"] = "\n".join(contentList)


        except Exception as e:
            logging.error(f"app222222222222 error:{e}",response.meta.get('index_url'),exc_info=True)
            medical_detail["error"] = 2
        collection_detail.insert_one(medical_detail)
        pass



    def zyMedicalStartRequest(self):
        try:
            medical_cursor = collection.find({})
            for document in medical_cursor:
                zhongyaomedicalItem = ZhongyaomedicalItem(**document)
                page_url = zhongyaomedicalItem["page_url"]
                index = zhongyaomedicalItem["index"]
                yield scrapy.Request(url=page_url,callback=self.parse,meta=zhongyaomedicalItem)
        except Exception as e:
            print(f"app error:{e}")

    def ZhongyaoMedicalScrapy(self,response):
        try:
            page_url = response.meta.get("page_url")
            page_index = response.meta.get("index")

            selector = Selector(response)

            print("--------------------------")
            # item['title'] = response.xpath('//a[@class="titlelnk"]/text()').extract()       #使用xpath搜索
            # item['link'] = response.xpath('//a[@class="titlelnk"]/@href').extract()
            lis = selector.xpath('//*[@id="list-content"]/ul/li')
            # print(lis)
            # liElements = lis.getall()
            for index,  li in enumerate(lis):
                item = ZhongyaomedicalItem()
                href = li.xpath(".//a/@href").get()
                item["index_url"] = href
                title = li.xpath(".//a/text()").get()
                item["title"] = title
                times = li.xpath(".//span/text()").getall()
                time = ",".join(times)
                print(page_index,index,href,title,time)
                collection_index.insert_one({"index_url":"https://www.zysj.com.cn"+href,"page_url":page_url,
                                             "page_index":page_index,"title":title,"pub_time":time})
                # yield item

            # pages = response.xpath('//*[@id="page"]/ul/li/a[contains(@href,"/yianxinde")]')
            # for pdex,pageE in enumerate(pages):
            #     # print(pdex,pageE)
            #     page_url = pageE.xpath("@href").get()
            #     request_url = "https://www.zysj.com.cn"+page_url
            #     print(pdex,request_url)
            #     # yield scrapy.Request(url=request_url,callback=self.parse)
            #     collection.insert_one({"index":str(pdex+2),"page_url":request_url})
            #

        except Exception as e:
            print(f"app error:{e}")
        pass