# -*- coding:utf-8 -*-

import requests
from lxml import etree
from fake_useragent import UserAgent
from http import cookiejar
import json
from datetime import datetime
import time
import re
import csv
import os
from pymongo import MongoClient


requests.packages.urllib3.disable_warnings()  # 忽略HTTPS安全警告

"""
国家标准物质资源共享平台
https://www.ncrm.org.cn/

国内资源
"""


class TestRequest():
    def __init__(self):
        self.headers = {
            # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
            # 'Accept-Encoding': 'gzip, deflate, br',
            # 'Accept-Language': 'zh-CN,zh;q=0.9',
            # 'Connection': 'keep-alive',
            # 'Cookie': 'acw_tc=2760821b16158177455973203e6fe36df051e7e0e53182a0c3f7021072d6bd; ASP.NET_SessionId=iqa3feoskoxoprqfm2eounjv; _pk_testcookie.49.d1ae=1; _pk_ses.49.d1ae=1; _pk_id.49.d1ae=085a946ad27c8c2d.1615817767.1.1615817824.1615817767.',
            # 'DNT': '1',
            # 'Host': 'www.ncrm.org.cn',
            # 'Referer': 'https://www.ncrm.org.cn/Web/Material/HomeList?term=&pageIndex=2',
            # 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"',
            # 'sec-ch-ua-mobile': '?0',
            # 'Sec-Fetch-Dest': 'document',
            # 'Sec-Fetch-Mode': 'navigate',
            # 'Sec-Fetch-Site': 'same-origin',
            # 'Sec-Fetch-User': '?1',
            # 'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36'
        }

    # 按照分类进行提取
    def get_contents(self, cursor):
        postData = {
            'lingyu1': '112',
            'lingyu2': '113',
            'jibie':'',
            'term':'',
            'selectedLiangzhis': '[]',
            'lingyuAutoID': '113',
            'pageIndex': cursor

        }
        url = f"https://www.ncrm.org.cn/Web/Ordering/Material"
        html = HttpUtils.do_request("GET", url, self.headers, postData)

        root = etree.HTML(html.text)

        for r in root.xpath('//p[@class="fontweight"]/a'):
            title = r.xpath('./text()')[0]
            url_child = "https://www.ncrm.org.cn" + r.xpath('./@href')[0]

            self.get_detail(url_child)

    # 按照搜索进行提取
    def get_search(self, cursor):

        url = f"https://www.ncrm.org.cn/Web/Material/HomeList?term=&pageIndex={cursor}"
        html = HttpUtils.do_request("GET", url, self.headers, "")

        root = etree.HTML(html.text)

        for r in root.xpath('//tbody/tr'):
            if len(r.xpath('./td[1]/span/a/text()')) > 0:
                dict_data = dict()
                dict_data['编号'] = "".join(r.xpath('./td[1]/span/a/text()'))
                dict_data['中文名称'] = "".join(r.xpath('./td[2]//text()'))
                dict_data['英文名称'] = "".join(r.xpath('./td[3]//text()'))
                dict_data['规格'] = "".join(r.xpath('./td[4]//text()'))
                url_child = "https://www.ncrm.org.cn" + r.xpath('./td[1]/span/a/@href')[0]

                TestRequest.ditc_to_csv("首页信息", dict_data)

                # self.get_detail(url_child)

    def get_detail(self, url_child):
        html_child = HttpUtils.do_request("GET", url_child, self.headers, "")

        root_child = etree.HTML(html_child.text)
        dict_data = dict()
        if len(root_child.xpath('//h5[@class="text_overflow_two"]/a/text()')) > 0:
            dict_data['标题'] = root_child.xpath('//h5[@class="text_overflow_two"]/a/text()')[0]
            dict_data['标题1'] = "".join(root_child.xpath('//h5[@class="text_overflow_two"]/text()')).strip()
            dict_data['标题2'] = "".join(root_child.xpath('//div[@class="jiage-item"]/span/text()')).replace('\xa0', '')
            dict_data['价格'] = "".join(root_child.xpath('//h4[@class="orange"]/text()'))

            element_table1 = root_child.xpath('//div[@data-index="0" and @data-jiage-content="true"]/table[@class="table-biaowu"]')
            if len(element_table1) > 0:
                dict_data['状态'] = "".join(element_table1[0].xpath('./tbody/tr[1]/td[2]/text()'))
                dict_data['物流'] = "".join(element_table1[0].xpath('./tbody/tr[2]/td[2]/text()'))
                dict_data['说明'] = "".join(element_table1[0].xpath('./tbody/tr[3]/td[2]/text()')).replace('\xa0', '')
                dict_data['有效期'] = "".join(element_table1[0].xpath('./tbody/tr[4]/td[2]/text()'))
            else:
                dict_data['状态'] = "该标物未通过平台共享，请直接与研制单位联系。"
                dict_data['物流'] = ""
                dict_data['说明'] = ""
                dict_data['有效期'] = ""

            element_table2 = root_child.xpath('//div[@data-index="0" and @data-tab="true"]/table[@class="table-biaowu"]')[0]
            dict_data['英文名称'] = "".join(element_table2.xpath('./tbody/tr[1]/td[2]/text()'))
            dict_data['应用领域'] = "".join(element_table2.xpath('./tbody/tr[4]/td[2]/text()'))
            dict_data['保存条件'] = "".join(element_table2.xpath('./tbody/tr[5]/td[2]/text()'))
            dict_data['使用注意事项'] = "".join(element_table2.xpath('./tbody/tr[6]/td[2]/text()'))
            dict_data['特征形态'] = "".join(element_table2.xpath('./tbody/tr[7]/td[2]/text()'))
            dict_data['基体'] = "".join(element_table2.xpath('./tbody/tr[8]/td[2]/text()'))
            dict_data['主要分析方法'] = "".join(element_table2.xpath('./tbody/tr[9]/td[2]/text()'))
            dict_data['定值单位'] = "".join(element_table2.xpath('./tbody/tr[10]/td[2]/text()'))
            dict_data['规格'] = "".join(element_table2.xpath('./tbody/tr[11]/td[2]/text()'))
            if len(dict_data['应用领域']) > 0:
                dict_data['领域大类'] = dict_data['应用领域'].split('/')[0]
                dict_data['领域小类'] = dict_data['应用领域'].split('/')[1]
            # 表格列
            colnamelist = []
            valuelist = []
            for c in root_child.xpath('//table[@class="edit-table"]/thead/tr/th'):
                colnamelist.append("".join(c.xpath('./text()')))
            # 表格数据
            for e in root_child.xpath('//table[@class="edit-table"]/tbody/tr'):
                # 表格值
                for index, value in enumerate(colnamelist):
                    colvalue = "".join(e.xpath(f'./td[{index + 1}]//text()'))
                    valuelist.append(colvalue)

                # 合并表格列+值
                for index in range(len(colnamelist)):
                    dict_data['表格列' + str(index + 1)] = colnamelist[index] + " " + valuelist[index]

                TestRequest.ditc_to_csv("测试", dict_data)
                valuelist.clear()

    # def insertItem(self, tableName, data):
    #     my_set = db[tableName]
    #     my_set.insert_one(data)


if __name__ == '__main__':
    # conn = MongoClient('127.0.0.1', 27017)
    # db = conn["Test"]
    test = TestRequest()

    # # 按照分类标签
    # for i in range(1, 2):
    #     print(f"============第【{i}】页=============")
    #     test.get_contents(i)

    # 按照搜索
    for i in range(1, 1383):
        print(f"============第【{i}】页=============")
        test.get_search(i)
