# _*_coding     : UTF_8_*_
# Author        :Jie Shen
# CreatTime     :2021/12/27 10:03
import time
import pandas as pd
from selenium import webdriver
import os
from pathlib import Path
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import re


class Qcc:
    def __init__(self,Provience,dirverPath):
        self.Provience = Provience
        self.driverPath = dirverPath
        self.Header = ["公司名", "标签", "法定代表人", "注册资本", "成立日期", "电话", "邮箱", "官网", "地址"]
        self.startPage = self.setInit()
        self.provienceList = ["广东", "北京", "江苏", "上海", "浙江", "四川", "山东", \
                         "河北", "河南", "福建", "湖北", "安徽", "重庆", "陕西", "湖南", \
                         "辽宁", "天津", "江西", "云南", "广西", "山西", "贵州", "吉林", \
                         "黑龙江", "内蒙古", "新疆", "甘肃", "海南", "香港", "宁夏", "青海", \
                         "台湾", "西藏", "澳门"]

    def setInit(self):
        Sign = input("执行新的程序输入1；继续旧程序输入0。\n>")
        while (1):
            if Sign == "1" or Sign == "0":
                break
        if Sign == "0":
            startPage = int(input("输入起始页面数>"))
        elif Sign == "1":
            startPage = 1
        # endPage = int(input("输入结尾的页面数>"))
        return startPage

    def generateProvinceSelector(self):

        Index = str(self.provienceList.index(self.Provience) + 1)
        # "#adsearchRow > div.adsearch-main > div > div.npanel-body > div:nth-child(5) > div.pills-after > span > div > div > div:nth-child(1) > div > ul > li:nth-child(1) > a > label > input[type=checkbox]"
        provienceSelector = "#adsearchRow > div.adsearch-main > div > div.npanel-body > div:nth-child(5) > div.pills-after > span > div > div > div:nth-child(1) > div > ul > li:nth-child({Li}) > a > label > input[type=checkbox]"

        # print(provienceSelector.format(Li = Index))
        provienceSelector = provienceSelector.format(Li = Index)
        return provienceSelector

    def Main(self):
        self.driver = webdriver.Chrome(self.driverPath)
        self.driver.implicitly_wait(1)  # 隐式等待时间
        self.driver.get("https://www.qcc.com/")
        self.driver.maximize_window()
        self.driver.find_element_by_css_selector(".lpan>li:nth-child(10)>a").click()
        input("扫码完毕?")
        self.driver.get("https://www.qcc.com/")
        self.driver.find_element_by_css_selector("#indexSearchForm > a.adsearch-btn").click()
        time.sleep(1)

        # 鼠标在 全部地区 悬停，等待页面展示省份信息。
        Hover = "#adsearchRow > div.adsearch-main > div > div.npanel-body > div:nth-child(5) > div.pills-after > span > span > span:nth-child(1)"
        element_to_hover_over = self.driver.find_element_by_css_selector(Hover)
        ActionChains(self.driver).move_to_element(element_to_hover_over).perform()  # 找到元素
        time.sleep(2)

        # 悬停到第一个元素上
        Start = "#adsearchRow > div.adsearch-main > div > div.npanel-body > div:nth-child(5) > div.pills-after > span > div > div > div:nth-child(1) > div > ul > li:nth-child(1) > a > label > span"
        element_to_hover_over = self.driver.find_element_by_css_selector(Start)
        ActionChains(self.driver).move_to_element(element_to_hover_over).perform()  # 找到元素
        time.sleep(2)

        Index = self.provienceList.index(self.Provience)
        Offset = int(Index/8)*270
        js = f'document.getElementsByClassName("nmulti-drop-col")[3].scrollTop={Offset}'.format(Offset=Offset)  # 可以调整10000，10000就是到底
        self.driver.execute_script(js)
        time.sleep(1)

        # 勾选省份
        provienceSelector = self.generateProvinceSelector()
        self.driver.find_element_by_css_selector(provienceSelector).send_keys(Keys.SPACE)
        time.sleep(1)

        # 确定，跳转到筛选出的指定省份信息页面
        OK = "#adsearchBottomContent > div > a.btn.btn-primary"
        self.driver.find_element_by_css_selector(OK).click()

        self.endPage = int(input("输入结尾的页面数>"))
        # 抽取每一页的信息
        for i in range(self.startPage, self.endPage + 1):
            time.sleep(1)
            self.jumpPage(i)

        self.Extract_info()

    def jumpPage(self, page):

        if page != 1:
            # Input_page = "#ajaxlist > div.npage-wrap > ul > li:nth-child(9) > input"
            # # 跳到哪一页
            # self.driver.find_element_by_css_selector(Input_page).send_keys(str(page))
            # # 确认跳转
            # OK = "#jumpPage"
            # self.driver.find_element_by_css_selector(OK).click()
            Url = "https://www.qcc.com/search_adsearchmultilist?p="
            self.driver.get(Url + str(page))
            print("第%d页" % page)

        chapterList = self.Extract_info()

        # 写入文件
        df = pd.DataFrame(chapterList)
        folder = Path(self.Provience)
        if not folder.exists():
            os.mkdir(folder)
        fileName = os.path.join(self.Provience, str(page) + ".csv")
        df.to_csv(fileName, encoding="gbk", index=None)

    def getStr(self, aimStr, totalStr):
        aimRes = re.findall(aimStr + "：.+", totalStr)
        if len(aimRes) == 0:
            return ''
        aimRes = aimRes[0].split('：')[1].strip()
        if ' ' in aimRes:
            aimRes = aimRes.split(' ')[0].strip()
        return aimRes

    def getPhoneWeb(self, aimStr, totalStr):
        if aimStr in totalStr:
            if len(totalStr.split('\n')) <= 1:
                print("存在错误:", totalStr)
            totalStr = totalStr.split('\n')[1].strip()
            if ' ' in totalStr:
                aimRes = totalStr.split(' ')[0].strip()
            else:
                aimRes = totalStr
        else:
            return ''
        return aimRes

    def Extract_info(self):
        '''
        获取每一页的所有的信息
        :return: list,章节
        '''
        legalPerson = ""
        registeredCapital = ""
        registeredDate = ""
        phoneNumber = ""
        email = ""
        webSite = ""
        address = ""

        Tr_Xpath = "//*[@id='searchlist']/table/tbody/tr"
        Tr = self.driver.find_elements_by_xpath(Tr_Xpath)
        chapterList = []
        for tr in Tr:
            companyName = tr.find_element_by_xpath("./td/a").text
            desc = tr.find_elements_by_xpath("./td/div[@class='search-tags']/span")
            label = []
            if len(desc) == 0:
                label = []
            else:
                for des in desc:
                    label.append(des.text)

            label = " ".join(label)

            MTXS = tr.find_elements_by_xpath("./td/p[@class='m-t-xs']")

            # legalPerson, registeredCapital, registeredDate, phoneNumber, email, webSite
            for mtxs in MTXS:
                txt = mtxs.text
                if "法定代表人" in txt:
                    legalPerson = self.getStr("法定代表人", txt)
                if "注册资本" in txt:
                    registeredCapital = self.getStr("注册资本", txt)
                if "成立日期" in txt:
                    registeredDate = self.getStr("成立日期", txt)
                if "电话： -" in txt:
                    phoneNumber = self.getStr("电话", txt)
                elif "电话" in txt:
                    phoneNumber = self.getPhoneWeb("电话", txt)
                if "邮箱" in txt:
                    email = self.getStr("邮箱", txt)
                if "官网： -" in txt:
                    webSite = self.getStr("官网", txt)
                elif "官网" in txt:
                    webSite = self.getPhoneWeb("官网", txt)
                if "地址" in txt:
                    address = self.getStr("地址", txt)

            infoList = [companyName, label, legalPerson, registeredCapital, registeredDate, phoneNumber, email, webSite,
                        address]
            if len(infoList) == len(self.Header):
                chapterList.append(pd.Series(infoList, index=self.Header))

        return chapterList


if __name__ == '__main__':
    # ["广东", "北京", "江苏", "上海", "浙江", "四川", "山东", \
    #  "河北", "河南", "福建", "湖北", "安徽", "重庆", "陕西", "湖南", \
    #  "辽宁", "天津", "江西", "云南", "广西", "山西", "贵州", "吉林", \
    #  "黑龙江", "内蒙古", "新疆", "甘肃", "海南", "香港", "宁夏", "青海", \
    #  "台湾", "西藏", "澳门"]
    q = Qcc("海南",r'D:\develop\chromedriver_win32\chromedriver.exe')
    q.Main()