import scrapy


import re
from sys import exc_info
from types import NoneType
from scrapy import Selector
import logging

from zhongyaoMiscellany.items import ZhongyaomiscellanyItem


from pymongo import MongoClient

# 连接到MongoDB，默认连接到localhost的27017端口
client = MongoClient('mongodb://erp:erp123@localhost:27017', maxPoolSize=10,
                     minPoolSize=3,
                     maxConnecting=30,
                     socketTimeoutMS=100000,
                     maxIdleTimeMS=60000,
                     connectTimeoutMS=40000,
                     waitQueueTimeoutMS=10000)

db = client['erp']

# collection = db['zhongyao_miscellany_page']
# collection_index = db['zhongyao_miscellany_index']

collection = db['zhongyao_miscellany_index']
collection_detail = db['zhongyao_miscellany_detail']


class ZhongyaomiscellanyspiderSpider(scrapy.Spider):
    name = "zhongyaoMiscellanySpider"
    allowed_domains = ["www.zysj.com.cn"]
    start_urls = ["https://www.zysj.com.cn/zaji/index.html"]
    base_url = "https://www.zysj.com.cn/zaji/index.html"

    def start_requests(self):
        ###### 1111111111111  page 爬取  ####
        # try:
        #     yield scrapy.Request(url=self.base_url,callback=self.parse)
        # except Exception as e:
        #     print(f"app error:{e}")


        # ############### 2222222222 index 抓取 #####################
        # try:
        #     medical_miscellany = collection.find({}) .sort({"page_index":1}) #.skip(0).limit(1).sort({"page_index":1})
        #     for document in medical_miscellany:
        #         zhongyaomiscellany = ZhongyaomiscellanyItem(**document)
        #         page_url = zhongyaomiscellany["page_url"]
        #         yield scrapy.Request(url=page_url,callback=self.parse,meta=zhongyaomiscellany)
        # except Exception as e:
        #     print(f"app error:{e}")

        ################  3333333333333333  detail 抓取  ######################################
        try:
            medical_miscellany_cursor = collection.find({}).skip(50000).limit(7000).sort({"pub_time" :-1}) #.skip(0).limit(1).sort({"page_index":1})
            for document in medical_miscellany_cursor:
                zhongyaomiscellany = ZhongyaomiscellanyItem(**document)
                index_url = zhongyaomiscellany["index_url"]
                yield scrapy.Request(url=index_url,callback=self.parse,meta=zhongyaomiscellany)
        except Exception as e:
            print(f"app error:{e}")

        pass


    def parse(self, response):
        ################  3333333333333333  detail 抓取  ######################################
        try:
            miscellany_detail = {}
            miscellany_detail["title"] = response.meta.get("title")
            miscellany_detail["pub_time"] = response.meta.get("pub_time")
            miscellany_detail["detail_url"] = response.meta.get("index_url")
            miscellany_detail["parent_url"] = response.meta.get("page_url")
            miscellany_detail["page_index"] = response.meta.get("page_index")


            h1_title = response.xpath('//*[@id="main"]/h1/text()').get()
            miscellany_detail["h1_title"] = h1_title
            infomations = response.xpath('//*[@id="information"]/div')

            for index,infomation in enumerate(infomations):
                try:
                    clazz = infomation.xpath("@class").get()
                    if(clazz):
                        clazz = re.sub("item ","",clazz)
                    infoItems = infomation.xpath(".//div")

                    for ind,info in enumerate(infoItems):
                        infoc = info.xpath("@class").get()
                        infoct = ""

                        if infoc is not NoneType:
                            infoc = re.sub("item-","_",infoc)
                            infoc = clazz+infoc

                            if(infoc=="summary_content" or infoc == "excerpt_content"):
                                infoct = info.xpath("text()")

                                if infoct is NoneType or infoct.__sizeof__() == 40:
                                    infoct = info.xpath(".//p/text()")

                                infoct = infoct.get()

                            else:
                                infoct = info.xpath("text()").get()
                            infoct = re.sub("\n","",infoct)
                        miscellany_detail[infoc] = infoct


                except Exception as e:
                    logging.error(f"app1111111 error:{e}",response.meta.get('index_url'),exc_info=True)
                    miscellany_detail["error"] = 1



            contentList = []
            contents = response.xpath('//*[@id="content"]/p')
            for index,content in enumerate(contents):
                conten = content.xpath("text()")
                if conten is NoneType or conten.__sizeof__() == 40:
                    continue
                contentList.append(conten.get())

            mis_content =  "\n".join(contentList)
            miscellany_detail["content"] =mis_content

        except Exception as e:
            miscellany_detail["error"] = 2
            print(f"app222222222222 error:{e}",response.meta.get('index_url'))
            # logging.error(f"app222222222222 error:{e}",response.meta.get('index_url'),exc_info=True)

        collection_detail.insert_one(miscellany_detail)
        print("success",miscellany_detail)
        pass

        ############### 2222222222 index 抓取 #####################
        # try:
        #     page_url = response.meta.get("page_url")
        #     page_index = response.meta.get("index")
        #
        #     selector = Selector(response)
        #
        #     print("--------------------------")
        #     # item['title'] = response.xpath('//a[@class="titlelnk"]/text()').extract()       #使用xpath搜索
        #     # item['link'] = response.xpath('//a[@class="titlelnk"]/@href').extract()
        #     lis = selector.xpath('//*[@id="list-content"]/ul/li')
        #     # print(lis)
        #     # liElements = lis.getall()
        #     for index,  li in enumerate(lis):
        #         item = ZhongyaomiscellanyItem()
        #         href = li.xpath(".//a/@href").get()
        #         item["index_url"] = href
        #         title = li.xpath(".//a/text()").get()
        #         item["title"] = title
        #         times = li.xpath(".//span/text()").getall()
        #         time = ",".join(times)
        #         print(page_index,index,href,title,time)
        #         collection_index.insert_one({"index_url":"https://www.zysj.com.cn"+href,"page_url":page_url,
        #                                      "page_index":page_index,"title":title,"pub_time":time})
                # yield item

            # pages = response.xpath('//*[@id="page"]/ul/li/a[contains(@href,"/yianxinde")]')
            # for pdex,pageE in enumerate(pages):
            #     # print(pdex,pageE)
            #     page_url = pageE.xpath("@href").get()
            #     request_url = "https://www.zysj.com.cn"+page_url
            #     print(pdex,request_url)
            #     # yield scrapy.Request(url=request_url,callback=self.parse)
            #     collection.insert_one({"index":str(pdex+2),"page_url":request_url})
            #
        # except Exception as e:
        #     print(f"app error:{e}")
        # pass





        ###### 1111111111111  page 爬取  ####
        # try:
        #     print("--------------------------")
        #
        #     selector = Selector(response)
        #
        #     # item['title'] = response.xpath('//a[@class="titlelnk"]/text()').extract()       #使用xpath搜索
        #     # item['link'] = response.xpath('//a[@class="titlelnk"]/@href').extract()
        #     lis = selector.xpath('//*[@id="list-content"]/ul/li')
        #     # print(lis)
        #     # liElements = lis.getall()
        #     for index,  li in enumerate(lis):
        #         item = ZhongyaomiscellanyItem()
        #         href = li.xpath(".//a/@href").get()
        #         item["index_url"] = href
        #         title = li.xpath(".//a/text()").get()
        #         item["title"] = title
        #         times = li.xpath(".//span/text()").getall()
        #         time = ",".join(times)
        #         print(index,href,title,time)
        #         # collection_index.insert_one({"index_url":"https://www.zysj.com.cn"+href,"page_url":page_url,
        #         #                              "page_index":page_index,"title":title,"pub_time":time})
        #         # yield item
        #
        #     pages = response.xpath('//*[@id="page"]/ul/li/a[contains(@href,"/zaji")]')
        #     for pdex,pageE in enumerate(pages):
        #         # print(pdex,pageE)
        #         page_url = pageE.xpath("@href").get()
        #         request_url = "https://www.zysj.com.cn"+page_url
        #         print(pdex,request_url)
        #         # yield scrapy.Request(url=request_url,callback=self.parse)
        #         collection.insert_one({"index":str(pdex+2),"page_url":request_url})
        #
        #
        # except Exception as e:
        #     print(f"app error:{e}")
        # pass

