import scrapy

from pymongo import MongoClient
from scrapy import Selector
from urllib import parse

from scrapy.utils.trackref import NoneType
from zhongyao_yaocai.items import ZhongyaoYaocaiItem

try:
    # 连接到MongoDB（默认连接到localhost的27017端口）
    username = "yaocai"
    password = parse.quote("yaocai@admin.123")
    client = MongoClient(f'mongodb://{username}:{password}@127.0.0.1:27017/?directConnection=true&authSource=zhongyao')

    # 选择数据库，如果不存在则会自动创建
    db = client['zhongyao']

    # 选择集合（如果不存在也会自动创建）
    collection = db['yaocai_yaoxing']
    # collection_yaoxing = db['yaocai_yaoxing']
    collection_index = db['yaocai_index']
    # collection_detail = db['yaocai_detail']
    collection_detail = db['yaocai_detail2']
except Exception as e:
    print(f"mongodb error:{e}")

base_url = "https://www.baicao99.com"


class ZhongyaoYaocaiSpiderSpider(scrapy.Spider):
    name = "zhongyao_yaocai_spider"
    allowed_domains = ["www.baicao99.com"]
    start_urls = ["https://www.baicao99.com/yaocai/"]
    page_index = 1


    def start_requests(self):
        try:

            # ################# 3.中药  药材详情 ########################
            yaocai_index_cursor = collection_index.find().sort({"index_name":1,"yaocai_index_name":1,"sort":1}).skip(25000).limit(5000)
            sort = 0
            for document in yaocai_index_cursor:
                zhongyaoYaocaiItem = ZhongyaoYaocaiItem(**document)
                # print(zhongyaoYaocaiItem)
                yaocai_index_url = zhongyaoYaocaiItem["yaocai_index_url"]
                sort = sort+1
                print("sort",sort)
                zhongyaoYaocaiItem['sort'] = sort
                yield scrapy.Request(url=yaocai_index_url, callback=self.parse,meta=zhongyaoYaocaiItem, dont_filter=True)

            # ################# 2.中药  药材索引查询 ########################
            # yaoxing_cursor = collection.find().skip(0).limit(36)
            # for document in yaoxing_cursor:
            #     zhongyaoYaocaiItem = ZhongyaoYaocaiItem(**document)
            #     index_url = zhongyaoYaocaiItem["index_url"]
            #     # prefix = index_url[0:(len(index_url) - len(".html"))]
            #     prefix = index_url[0:index_url.rfind("_")]
            #     for i in range(51):
            #         url = base_url + prefix+"_"+str(i)+".html"
            #         print("url--->",url)
            #         zhongyaoYaocaiItem["index_url"] = url
            #         yield scrapy.Request(url=url, callback=self.parse,
            #                          meta=zhongyaoYaocaiItem, dont_filter=True)

            ################# 1.药性 查询 ########################
            # base_url = "https://www.baicao99.com/yaocai/"
            # yield scrapy.Request(url=base_url, callback=self.parse,dont_filter=True)


        except Exception as e:
            print(f"app error:{e}")

    def parse(self, response):

        ######## start ######### 3.中药 药材详情信息爬取 ########################
        try:
            yaocai_detail = {}
            yaocai_detail["name"] = response.meta.get("yaocai_index_name")
            yaocai_detail["index_url"] =  response.meta.get("index_url")
            yaocai_detail["index_belong"] = response.meta.get("index_name")
            yaocai_detail["index_belong_part"] = response.meta.get("index_belong")
            yaocai_detail["detail_url"] = response.meta.get("yaocai_index_url")
            sort = response.meta.get("sort")
            yaocai_detail["yaocai_xingzhuang"] = response.meta.get("yaocai_xingzhuang")
            yaocai_detail["sort"] = sort
            yaocai_detail["yaocai_liaoxiao"] = response.meta.get("yaocai_liaoxiao")
            yaocai_detail["yaoxing"] = response.meta.get("yaoxing")
            selector = Selector(response)
            biaotiTable = selector.xpath('//*[@id="write"]/table[1]/tbody/tr/td')

            if(biaotiTable is NoneType):
                print(sort,response.meta.get("yaocai_index_name"),"没爬取到数据")
            else:
                print(sort,response.meta.get("yaocai_index_name"),"开始爬取数据")

            time = ""
            lable_name = ""
            click_count = ""
            for bindx,lable in enumerate(biaotiTable):
                if bindx == 0:
                    time = lable.xpath('text()').get()
                elif bindx == 1:
                    lable_name = lable.xpath('text()').get()
                elif bindx == 2:
                    click_count = lable.xpath('text()').get()

                # print(time,lable_name,click_count)
                yaocai_detail['pub_time'] = time
                yaocai_detail['lable_name'] = lable_name
                yaocai_detail['click_count'] = click_count

            base_info = selector.xpath('//*[@id="write"]/p/text()')
            yaocai_detail["introduce"] = base_info.get()
            content_trs = selector.xpath('//*[@id="write"]/table[2]/tbody/tr')
            for index,content_tr in enumerate(content_trs):
                content_tds = content_tr.xpath('.//td')
                key = ''
                value = ''
                for index_td,content_td in enumerate(content_tds):
                    # print(index_td,content_td)
                    if index_td == 0:
                        key = content_td.xpath("text()").get()
                    elif index_td == 1:
                        value = content_td.xpath("text()").get()
                yaocai_detail[key] = value

            collection_detail.insert_one(yaocai_detail)
            ####### end ########## 3.中药 药材详情信息爬取 ########################



            pass
        except Exception as e:
            print(f"################parse error:{e}#############")




    ################# 2.中药  标题查询 ########################
        # try:
        #     selector = Selector(response)
        #     results = selector.xpath('//*[@id="write"]/div[contains(@class,"s_result")]')
        #     if results is NoneType:
        #         return
        #     index_name = response.meta.get("index_name")
        #     index_url = response.meta.get("index_url")
        #     index_yaoxing = response.meta.get("yaoxing")
        #
        #     for index, yaocai_title in enumerate(results):
        #         yaocai_index = {}
        #         yaocai_index["index_name"] = index_name
        #         yaocai_index["index_url"] = index_url
        #         yaocai_index["index_belong"] = index_yaoxing
        #         yaocai_index_url = base_url + yaocai_title.xpath(".//a/@href").get()
        #         yaocai_index_title = yaocai_title.xpath(".//a/h5/text()").get()
        #
        #         yaocai_xingzhuang = yaocai_title.xpath(".//ul/li[1]/text()").get()
        #         yaocai_liaoxiao = yaocai_title.xpath(".//ul/li[2]/text()").get()
        #         yaocai_yaoxings = yaocai_title.xpath(".//ul/li[3]/span")
        #         yaoxing_arr = []
        #         for ind, yaoxing in enumerate(yaocai_yaoxings):
        #             yaoxing_arr.append(yaoxing.xpath("text()").get())
        #         yaocai_index["sort"] = index
        #         yaocai_index["yaocai_index_name"] = yaocai_index_title
        #         yaocai_index["yaocai_index_url"] = yaocai_index_url
        #         yaocai_index["yaocai_xingzhuang"] = yaocai_xingzhuang
        #         yaocai_index["yaocai_liaoxiao"] = yaocai_liaoxiao
        #         yaocai_index["yaoxing"] = yaoxing_arr
        #         print(index, yaocai_index)
        #         collection_index.insert_one(yaocai_index)
        # except Exception as e:
        #     print(f"################parse error:{e}#############")


        ############################## 1.药性 查询 ########################
        # try:
        #     selector = Selector(response)
        #     # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[1]/td[2]/a')
        #     # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[3]/td[2]/a')
        #     # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[4]/td[2]/a')
        #     gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[5]/td[2]/a')
        #     for index, gj in enumerate(gj_list):
        #         # yaocao_yaoxing_index = {"index_name": "归经"}
        #         # yaocao_yaoxing_index = {"index_name": "药味"}
        #         # yaocao_yaoxing_index = {"index_name": "药性"}
        #         yaocao_yaoxing_index = {"index_name": "部位"}
        #         url = gj.xpath("@href")
        #         text = gj.xpath("text()")
        #         yaocao_yaoxing_index["yaoxing"] = text.get()
        #         yaocao_yaoxing_index["index_url"] = url.get()
        #         print(yaocao_yaoxing_index)
        #         collection.insert_one(yaocao_yaoxing_index)
        #
        # except Exception as e:
        #     print(f"################parse error:{e}#############")

    # def execute_find(self, response):
    #     print("----------------execute_find--------------")
    #     try:
    #         ################# 2.中药  标题查询 ########################
    #         selector = Selector(response)
    #         results = selector.xpath('//*[@id="write"]/div')
    #         for index, yaocai_title in enumerate(results):
    #             yaocai_index_url = yaocai_title.xpath(".//a/@href").get()
    #             yaocai_index_title = yaocai_title.xpath(".//a/h5/text()").get()
    #
    #             yaocai_xingzhuang = yaocai_title.xpath(".//ul/li[1]/text()").get()
    #             yaocai_liaoxiao = yaocai_title.xpath(".//ul/li[2]/text()").get()
    #             yaocai_yaoxings = yaocai_title.xpath(".//ul/li[3]/span")
    #             yaoxing_arr = []
    #             for ind, yaoxing in enumerate(yaocai_yaoxings):
    #                 yaoxing_arr.append(yaoxing.xpath("text()").get())
    #
    #             # print(index, yaocai_index_title, yaocai_index_url, yaocai_xingzhuang, yaocai_liaoxiao, yaoxing_arr)
    #
    #         ################# 1.药性 查询 ########################
    #         # selector = Selector(response)
    #         # # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[1]/td[2]/a')
    #         # # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[3]/td[2]/a')
    #         # # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[4]/td[2]/a')
    #         # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[5]/td[2]/a')
    #         # for index, gj in enumerate(gj_list):
    #         #     # yaocao_yaoxing_index = {"index_name": "归经"}
    #         #     # yaocao_yaoxing_index = {"index_name": "药味"}
    #         #     # yaocao_yaoxing_index = {"index_name": "药性"}
    #         #     yaocao_yaoxing_index = {"index_name": "部位"}
    #         #     url = gj.xpath("@href")
    #         #     text = gj.xpath("text()")
    #         #     yaocao_yaoxing_index["yaoxing"] = text.get()
    #         #     yaocao_yaoxing_index["index_url"] = url.get()
    #         #     print(yaocao_yaoxing_index)
    #         #     collection.insert_one(yaocao_yaoxing_index)
    #
    #         page_lis = selector.xpath('//*[@id="write"]/ul/li')
    #         for page_index, page in enumerate(page_lis):
    #             page_url = page.xpath(".//a/@href").get()
    #             print(page_index,page)
    #             yield scrapy.Request(url=base_url + page_url, callback=self.parse)
    #
    #     except Exception as e:
    #         print(f"################parse error:{e}#############")

################## mysql ###################################
# 连接到MySQL数据库

# connection = pymysql.connect(host='your_host',    # 通常是localhost或数据库服务器的IP地址
#                              user='your_username',  # 你的MySQL用户名
#                              password='your_password',  # 你的MySQL密码
#                              database='your_database',  # 要连接的数据库名
#                              cursorclass=pymysql.cursors.DictCursor)  # 使用DictCursor获取字典格式的结果集
#
# try:
#     with connection.cursor() as cursor:
#         # 执行SQL查询使用execute()方法
#         sql = "SELECT * FROM your_table"
#         cursor.execute(sql)
#         result = cursor.fetchall()  # 获取所有记录列表，使用fetchall()方法获取所有记录，或者使用fetchone()获取单条记录，fetchmany(size)获取多条记录。
#         for row in result:
#             print(row)  # 打印每条记录的内容。每条记录是一个字典。
# finally:
#     connection.close()  # 关闭数
