import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from lxml import etree
import csv

class SpiderTianyancha:

    host = 'https://www.tianyancha.com/'

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36'
    }

    def __init__(self, key, cookie, text):
        requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
        self.key = key
        self.headers['Cookie'] = cookie
        self.text = text

    def get_total_page(self):
        search_url = self.host+'search/ohm1-he1/p1'
        reps = requests.get(search_url, params={'key': self.key}, headers=self.headers, verify=False)
        if reps.status_code == 200:
            # 解析页面
            html = etree.HTML(reps.text)
            # 获取总页数
            total_page = html.xpath('.//div[contains(@class,"search-pager")]//li[last()-1]')
            if total_page:
                return int(total_page[0].xpath('string()').replace('.', ''))
            else:
                self.parse_error_page(html)
                return 0
        else:
            self.update_text_info('发生错误：'+str(reps.status_code))

    def write_info(self, page):
        search_url = self.host+'search/ohm1-he1/p'+str(page)

        reps = requests.get(search_url, params={'key': self.key}, headers=self.headers, verify=False)
        
        if reps.status_code == 200:
            # parser = etree.HTMLParser(encoding='utf8')
            # html = etree.parse(reps.text, parser=parser)
            # 解析页面
            html = etree.HTML(reps.text)
            # 获取所有搜索项
            search_items = html.xpath('.//div[@class="result-list sv-search-container"]/div')
            
            if search_items:
                # 写入csv
                f = open('data/'+self.key+'.csv', 'a', encoding='utf-8', newline='')
                csv_writer = csv.writer(f)
                for i in search_items:
                    title_ele = i.xpath('.//a[starts-with(@class, "name")]')[0]
                    title = title_ele.xpath('string()')
                    url = title_ele.xpath('@href')[0]
                    legal_person_ele = i.xpath('.//a[contains(@class, "legalPersonName")]')

                    if legal_person_ele:
                        legal_person = legal_person_ele[0].xpath('string()')
                    else:
                        legal_person = '未公开'

                    phone = i.xpath('.//div[contains(@class, "contact")]/div[@class="col"][1]/span[2]')[0].xpath('string()').replace('查看更多', '')
                    emali = i.xpath('.//div[contains(@class, "contact")]/div[@class="col"][2]/span[2]')[0].xpath('string()').replace('查看更多', '')
                    address = self.get_address(url)

                    csv_writer.writerow([title,legal_person,phone,emali,address])
                
                f.close()
            else:
                self.parse_error_page(html)
                return False

            return True
        else:
            self.update_text_info('发生错误：'+str(reps.status_code))
            return False

    def get_address(self, url):
        reps = requests.get(url, headers=self.headers, verify=False)
        if reps.status_code == 200:
            html = etree.HTML(reps.text)
            address_ele = html.xpath('.//div[@class="in-block sup-ie-company-header-child-2"]/div[@class="auto-folder"]/div')
            if address_ele:
                return address_ele[0].xpath('string()')
            else:
                return '未公开'
        else:
            self.update_text_info('发生错误：'+str(reps.status_code))
            return '发生错误'

    def parse_error_page(self, html):
        tips = html.xpath('.//div[@class="captcha-title"]')
        if tips:
            self.update_text_info(tips[0].xpath('string()'))
        else:
            self.update_text_info('登录失效，请检查当前登录状态')
    
    def update_text_info(self, text):
        self.text.insert('end', text)
        self.text.see('end')
        self.text.update()
