import scrapy

from pymongo import MongoClient
from scrapy import Selector
from urllib import parse
import re

from scrapy.utils.trackref import NoneType
from zhongyao_yaofang.items import ZhongyaoYaofangItem

try:
    # 连接到MongoDB（默认连接到localhost的27017端口）
    username = "yaocai"
    password = parse.quote("yaocai@admin.123")
    client = MongoClient(f'mongodb://{username}:{password}@127.0.0.1:27017/?directConnection=true&authSource=zhongyao')

    # 选择数据库，如果不存在则会自动创建
    db = client['zhongyao']

    # 选择集合（如果不存在也会自动创建）
    collection_yaofang = db['yaofang']
    collection_yaofang_category = db['yaofang_category']
    collection_yaofang_category_index = db['yaofang_category_index']
    collection_yaofang_detail = db['yaofang_detail']
except Exception as e:
    print(f"mongodb error:{e}")

base_url = "https://www.baicao99.com"




class ZhongyaoYaofangSpiderSpider(scrapy.Spider):
    name = "zhongyao_yaofang_spider"
    allowed_domains = ["www.zysj.com.cn"]
    start_urls = ["https://www.zysj.com.cn"]

    page_index = 1


    def start_requests(self):
        try:

            # ################# 3.药方 详细信息 查询 ########################
            # yaofang_cursor = collection_yaofang_category.find().skip(0).limit(36)
            # yaofang_cursor = collection_yaofang_category_index.find().skip(0).limit(5000)
            # for document in yaofang_cursor:
            #     zhongyaoYaofangItem = ZhongyaoYaofangItem(**document)
            #     yaofang_index_url = zhongyaoYaofangItem["yaofang_index_url"]
            #     yield scrapy.Request(url=yaofang_index_url, callback=self.parse,  meta=zhongyaoYaofangItem,dont_filter=True)

            # ################# 2.药方 索引 查询 ########################
            # yaoxing_cursor = collection_yaofang_category.find().skip(0).limit(1)
            yaoxing_cursor = collection_yaofang_category.find().skip(0).limit(40)
            for document in yaoxing_cursor:
                zhongyaoYaofangItem = ZhongyaoYaofangItem(**document)
                category_url = zhongyaoYaofangItem["category_url"]
                # prefix = index_url[0:(len(index_url) - len(".html"))]
                prefix = category_url[0:category_url.rfind("_")]
                for i in range(51):
                    page_index = (i+1)
                    url = base_url + prefix+"_"+str(page_index)+".html"
                    print("url--->",url)
                    zhongyaoYaofangItem["category_url"] = url
                    zhongyaoYaofangItem["page_index"] = page_index
                    yield scrapy.Request(url=url, callback=self.parse,  meta=zhongyaoYaofangItem,dont_filter=True)

            ################# 1.药方 分类 查询 ########################
            # base_url = "https://www.baicao99.com/yaofang/"
            # yield scrapy.Request(url=base_url, callback=self.parse,dont_filter=True)
        except Exception as e:
            print(f"app error:{e}")

    def parse(self, response):

        ################# 3.药方 详细信息 查询 ########################
        # try:
        #     yaocai_detail = {}
        #     yaocai_detail["name"] = response.meta.get("yaocai_index_name")
        #     yaocai_detail["index_url"] =  response.meta.get("index_url")
        #     yaocai_detail["index_belong"] = response.meta.get("index_name")
        #     yaocai_detail["index_belong_part"] = response.meta.get("index_belong")
        #     yaocai_detail["detail_url"] = response.meta.get("yaocai_index_url")
        #     sort = response.meta.get("sort")
        #     yaocai_detail["yaocai_xingzhuang"] = response.meta.get("yaocai_xingzhuang")
        #     yaocai_detail["sort"] = sort
        #     yaocai_detail["yaocai_liaoxiao"] = response.meta.get("yaocai_liaoxiao")
        #     yaocai_detail["yaoxing"] = response.meta.get("yaoxing")
        #     selector = Selector(response)
        #     pub_time = selector.xpath('//*[@id="write"]/table[1]/tbody/tr/td[1]').get()
        #     yaocai_detail["pub_time"] = pub_time
        #
        #     biaotiTable = selector.xpath('//*[@id="write"]/table[1]/tbody/tr/td')
        #     if(biaotiTable is NoneType):
        #         print(sort,response.meta.get("yaocai_index_name"),"没爬取到数据")
        #     else:
        #         print(sort,response.meta.get("yaocai_index_name"),"开始爬取数据")
        #
        #     time = ""
        #     lable_name = ""
        #     click_count = ""
        #     for bindx,lable in enumerate(biaotiTable):
        #         if bindx == 0:
        #             time = lable.xpath('text()').get()
        #         elif bindx == 1:
        #             lable_name = lable.xpath('text()').get()
        #         elif bindx == 2:
        #             click_count = lable.xpath('text()').get()
        #
        #         # print(time,lable_name,click_count)
        #         yaocai_detail['pub_time'] = time
        #         yaocai_detail['lable_name'] = lable_name
        #         yaocai_detail['click_count'] = click_count
        #
        #     base_info = selector.xpath('//*[@id="write"]/p/text()')
        #     yaocai_detail["introduce"] = base_info.get()
        #     content_trs = selector.xpath('//*[@id="write"]/table[2]/tbody/tr')
        #     for index,content_tr in enumerate(content_trs):
        #         content_tds = content_tr.xpath('.//td')
        #         key = ''
        #         value = ''
        #         for index_td,content_td in enumerate(content_tds):
        #             # print(index_td,content_td)
        #             if index_td == 0:
        #                 key = content_td.xpath("text()").get()
        #             elif index_td == 1:
        #                 value = content_td.xpath("text()").get()
        #         yaocai_detail[key] = value
        #
        #     print(yaocai_detail)
        #     # collection_yaofang_detail.insert_one(yaocai_detail)
        #     ####### end ########## 3.中药 药材详情信息爬取 ########################
        #
        #
        #
        #     pass
        # except Exception as e:
        #     print(f"################parse error:{e}#############")



        # ################# 2.药方 索引 查询 ########################
        try:
            print("------------------------------------")
            selector = Selector(response)
            results = selector.xpath('//*[@id="write"]/div[contains(@class,"s_result")]')
            if results is NoneType:
                return
            category_name = response.meta.get("category_name")
            category_url = response.meta.get("category_url")
            page_index = response.meta.get("page_index")


            for index, yaofang_title in enumerate(results):
                yaofang_index = {}
                yaofang_index["category_name"] = category_name
                yaofang_index["category_url"] = category_url
                yaofang_index["page_index"] = page_index
                yaofang_index_url = base_url + yaofang_title.xpath(".//a/@href").get()
                yaofang_index_name = yaofang_title.xpath(".//a/h5/text()").get()


                tag_lis = yaofang_title.xpath(".//ul/li")

                # 功用
                for tag_index,tag in enumerate(tag_lis):
                    tag_name = tag.xpath(".//span/text()").get()
                    tag_content = tag.xpath("text()").get()
                    # print(tag_name,tag_content)
                    tag_name = tag_name[0:2]
                    tag = ''
                    if '功用' == tag_name:
                        tag = 'functions'
                    elif '组成' == tag_name:
                        tag = 'component'
                    elif '运用' == tag_name:
                        tag = 'use'
                    else:
                        continue
                    # yaofang_index.__setattr__(tag,tag_content)
                    yaofang_index[tag] = tag_content

                yaofang_index["sort"] = index
                yaofang_index["yaofang_index_name"] = yaofang_index_name
                yaofang_index["yaofang_index_url"] = yaofang_index_url
                collection_yaofang_category_index.insert_one(yaofang_index)
                print(yaofang_index)
        except Exception as e:
            print(f"################parse error:{e}#############")


        ############################## 1.药方 分类 查询 ########################
        # try:
            # selector = Selector(response)
            # yf_category_list = selector.xpath('//*[@id="write"]/table/tbody/tr/td[2]/a[contains(@href,"/yaofang")]')
            # for index, gj in enumerate(yf_category_list):
            #     yaofang_catory_info = {}
            #     url = gj.xpath("@href")
            #     text = gj.xpath("text()")
            #     yaofang_catory_info["category_name"] = text.get()
            #     yaofang_catory_info["category_url"] = url.get()
            #     print(yaofang_catory_info)
            #     collection_yaofang_category.insert_one(yaofang_catory_info)
        # except Exception as e:
        #     print(f"################parse error:{e}#############")

    # def execute_find(self, response):
    #     print("----------------execute_find--------------")
    #     try:
    #         ################# 2.中药  标题查询 ########################
    #         selector = Selector(response)
    #         results = selector.xpath('//*[@id="write"]/div')
    #         for index, yaofang_title in enumerate(results):
    #             yaofang_index_url = yaofang_title.xpath(".//a/@href").get()
    #             yaofang_index_title = yaofang_title.xpath(".//a/h5/text()").get()
    #
    #             yaofang_xingzhuang = yaofang_title.xpath(".//ul/li[1]/text()").get()
    #             yaofang_liaoxiao = yaofang_title.xpath(".//ul/li[2]/text()").get()
    #             yaofang_yaoxings = yaofang_title.xpath(".//ul/li[3]/span")
    #             yaoxing_arr = []
    #             for ind, yaoxing in enumerate(yaofang_yaoxings):
    #                 yaoxing_arr.append(yaoxing.xpath("text()").get())
    #
    #             # print(index, yaofang_index_title, yaofang_index_url, yaofang_xingzhuang, yaofang_liaoxiao, yaoxing_arr)
    #
    #         ################# 1.药性 查询 ########################
    #         # selector = Selector(response)
    #         # # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[1]/td[2]/a')
    #         # # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[3]/td[2]/a')
    #         # # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[4]/td[2]/a')
    #         # gj_list = selector.xpath('//*[@id="write"]/table/tbody/tr[5]/td[2]/a')
    #         # for index, gj in enumerate(gj_list):
    #         #     # yaocao_yaoxing_index = {"index_name": "归经"}
    #         #     # yaocao_yaoxing_index = {"index_name": "药味"}
    #         #     # yaocao_yaoxing_index = {"index_name": "药性"}
    #         #     yaocao_yaoxing_index = {"index_name": "部位"}
    #         #     url = gj.xpath("@href")
    #         #     text = gj.xpath("text()")
    #         #     yaocao_yaoxing_index["yaoxing"] = text.get()
    #         #     yaocao_yaoxing_index["index_url"] = url.get()
    #         #     print(yaocao_yaoxing_index)
    #         #     collection.insert_one(yaocao_yaoxing_index)
    #
    #         page_lis = selector.xpath('//*[@id="write"]/ul/li')
    #         for page_index, page in enumerate(page_lis):
    #             page_url = page.xpath(".//a/@href").get()
    #             print(page_index,page)
    #             yield scrapy.Request(url=base_url + page_url, callback=self.parse)
    #
    #     except Exception as e:
    #         print(f"################parse error:{e}#############")

################## mysql ###################################
# 连接到MySQL数据库

# connection = pymysql.connect(host='your_host',    # 通常是localhost或数据库服务器的IP地址
#                              user='your_username',  # 你的MySQL用户名
#                              password='your_password',  # 你的MySQL密码
#                              database='your_database',  # 要连接的数据库名
#                              cursorclass=pymysql.cursors.DictCursor)  # 使用DictCursor获取字典格式的结果集
#
# try:
#     with connection.cursor() as cursor:
#         # 执行SQL查询使用execute()方法
#         sql = "SELECT * FROM your_table"
#         cursor.execute(sql)
#         result = cursor.fetchall()  # 获取所有记录列表，使用fetchall()方法获取所有记录，或者使用fetchone()获取单条记录，fetchmany(size)获取多条记录。
#         for row in result:
#             print(row)  # 打印每条记录的内容。每条记录是一个字典。
# finally:
#     connection.close()  # 关闭数
