import time
import urllib.parse
from datetime import datetime

import scrapy
from Interior_Design_email.get_company import get_names
from email_spider.items import EmailSpiderItem
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

from email_spider.set.setting import Set


class EmailSpider(scrapy.Spider):
    name = 'email'

    # allowed_domains = ['www.tianyancha.com']
    # start_urls = ['https://www.tianyancha.com']
    # start_urls = ['https://www.tianyancha.com/search?key={}'.format(data)]

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        chrome_options = Options()
        # chrome_options.add_argument('--headless')
        # chrome_options.add_argument('--disable-apu')
        # chrome_options.add_argument('--no-sandbox')
        self.driver = webdriver.Chrome(chrome_options=chrome_options)
        self.data = time.strftime('%Y-%m-%d')

        self.file_name = Set().file_name  # 需要爬取的数据源文件夹名

    def start_requests(self):
        companys = get_names()  # 获取公司列表
        i = 0
        for company in companys:
            value = urllib.parse.quote(company)
            i += 1
            print('第{}条《{}》spider start ....'.format(i, company))
            url = 'https://www.tianyancha.com/search?key={}'.format(value)
            yield scrapy.Request(url=url, meta={'company': company, 'page_num': i}, callback=self.parse,
                                 dont_filter=True)

    def parse(self, response):
        item = EmailSpiderItem()
        company = response.meta['company']
        page_num = response.meta['page_num']
        # 查找的数据表属性头
        xpath = '//div[@class="result-list sv-search-container"]/div[1][@class="search-item sv-search-company"]//div[@class="contact row "]'
        xpath2 = '//div[@class="result-list sv-search-container"]/div[1][@class="search-item sv-search-company"]//div[@class="header"]'
        try:
            email = response.xpath('{}/div[2]/span[2]/text()'.format(xpath)).get()  # 邮箱
            tel = response.xpath('{}/div[1]/span[2]/span[1]/text()'.format(xpath)).get()  # 电话
            tianyancha_url = response.xpath('{}/a[1]/@href'.format(xpath2)).get()  # 天眼查公司链接

            if not email:
                print('{}: email ...'.format(company))
                f = open(self.file_name + '/spider_error_' + self.data + '.txt', 'a', encoding='utf-8')
                f.write('\n {}---:{}: email...: {}\n'.format(datetime.now(), company, response.url))
                f.close()
            if not tel:
                print('{}: tek ...'.format(company))
                f = open(self.file_name + '/spider_error_' + self.data + '.txt', 'a', encoding='utf-8')
                f.write('\n {}---:{}: tel...: {}\n'.format(datetime.now(), company, response.url))
                f.close()

            item['email'] = email
            item['tel'] = tel
            item['company'] = company
            item['tianyancha_url'] = tianyancha_url
            print('第{}条《{}》spider success.'.format(page_num, company))
            yield item
        except Exception as e:
            print('第{}条《{}》spider error...{}'.format(page_num, company, e))
            f = open(self.file_name + '/spider_error_' + self.data + '.txt', 'a', encoding='utf-8')
            f.write('\n {}---:{}: spider error...: {}'.format(datetime.now(), company, e))
            f.write('\n {}\n'.format(response.url))
            f.close()

    def get_db2(self):
        """从文本txt获取公司名"""
        f = open('company.txt', 'r', encoding='utf-8')
        txt = f.read()
        f.close()
        names = txt.split()
        return names

    def close(self, spider, reason):
        self.driver.quit()
