#!/usr/bin/env python
# -*- coding:utf-8 -*-

import time
import json
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from lxml import etree


desired_capabilities = DesiredCapabilities.FIREFOX
desired_capabilities['loggingPrefs'] = {'performance': 'ALL'}


class AutoSpider(object):
    def __init__(self):
        self.init_browder()
        self.company_info_dict = {}
        self.start_year = 2021

    def init_browder(self):
        options = webdriver.FirefoxOptions()
        options.add_argument("disable-blink-features=AutomationControlled")  # 就是这一行告诉chrome去掉了webdriver痕迹
        self.browser = webdriver.Firefox(executable_path='./geckodriver.exe',
                                    options=options, desired_capabilities=desired_capabilities)
        self.wai = WebDriverWait(self.browser, 20)  # 等待20s后抛出异常

    def login(self,username="16616629296",pwd='LAUGHing96'):
        username_elem = self.wai.until(EC.element_to_be_clickable((By.ID, 'account')))
        pwd_elem = self.wai.until(EC.element_to_be_clickable((By.ID, 'password')))
        submit_elem = self.wai.until(EC.element_to_be_clickable((By.ID, 'loginBtn')))
        checkbox_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH, '//label[contains(@class,"login-clause")]/span[@class="ant-checkbox"]')))
        # 使用账号密码登陆
        username_elem.send_keys(username)
        pwd_elem.send_keys(pwd)
        checkbox_elem.click()
        time.sleep(1)
        submit_elem.click()
        time.sleep(5)


    def change_search_param(self,left_times=1,right_times=1):
        self_tmp_elem = self.wai.until(EC.element_to_be_clickable((By.ID, 'tab-customize')))
        self_tmp_elem.click()
        self_model_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH, '//div[@class="recommend-child-name" and contains(text(),"xxx")]')))
        self_model_elem.click()

        if left_times:
            start_time_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,'//input[@placeholder="开始日期"]')))
            end_time_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,'//input[@placeholder="结束日期"]')))
            start_time_elem.click()

            left_last_year_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,'//button[@class="el-picker-panel__icon-btn el-icon-d-arrow-left"]')))
            # if len(ls) == 1:
            #     left_last_year_elem = 0
            # else:
            #     left_last_year_elem,right_last_year_elem = ls
            for i in range(left_times):
                left_last_year_elem.click()
            # for i in range(right_times):
            #     right_last_year_elem.click()
            left_day_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,'//div[contains(@class,"is-left")]//tr[@class="el-date-table__row"]/td[contains(@class,"available")]/div/span[text()=1]')))
            left_day_elem.click()
            right_day_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,
                                                                             '//div[contains(@class,"is-right")]//tr[@class="el-date-table__row"]/td[contains(@class,"available")]/div/span[text()=1]')))
            right_day_elem.click()
        search_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,'//div[contains(@class,"search-btn")]')))
        search_elem.click()
        time.sleep(1)

    def wait_company_info_lode(self):
        self.wai.until(EC.element_to_be_clickable((By.XPATH,'//table[@class="el-table__body"]')))

        time.sleep(1)

    def save_company_info(self):
        tr_elems = self.browser.find_elements_by_xpath('//tr[contains(@class,"el-table__row")]//span[contains(@class,"unShow")]')
        for index,elem in enumerate(tr_elems):
            try:
                time.sleep(1)
                elem.click()
                time.sleep(3)
                phone_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH, '//div[@name="report-contact_mobile"]')))
                phone_elem.click()
                time.sleep(3)
                file_name = '李霄云/' + str(time.time())
                with open(file_name, 'w', encoding='utf-8') as f:
                    f.write(self.browser.page_source)
                    self.parse_source_code(self.browser.page_source)
            except:
                print('observe')


    def parse_source_code(self,text):
        print('解析源码')
        selector = etree.HTML(text, parser=etree.HTMLParser(encoding='utf-8'))
        company_name = ''.join(selector.xpath('//div[@class="base-info"]//span[@class="name"]/text()')).strip()
        if not self.company_info_dict.get('company_name'):
            self.company_info_dict[company_name] = {'start_year':self.start_year}

        div_selectors = selector.xpath('//div[@role="listitem"]')
        print(div_selectors)
        phone_count = 0
        save_ls = []
        for div_selector in div_selectors:

            phone = ''.join(div_selector.xpath('./div//div[@class="panel-item-row"]/span[contains(text(),"手机")]/following-sibling::span/span/text()')).strip()
            if '-' in phone or len(phone)!=11:
                continue
            source_name_ls = div_selector.xpath('./div//div[@class="panel-item-row"]/span[contains(text(),"来源")]/following-sibling::span/a/text()')
            source_list = div_selector.xpath(
                './div//div[@class="panel-item-row"]/span[contains(text(),"来源")]/following-sibling::div/span/text()')
            source_name_ls.extend(source_list)
            source_href_ls = div_selector.xpath('./div//div[@class="panel-item-row"]/span[contains(text(),"来源")]/following-sibling::span/a/@href')
            print(company_name, phone, source_name_ls,source_href_ls)
            print('==============================')
            save_ls.append([source_name_ls,source_href_ls,phone])
            phone_count += 1
            if not self.company_info_dict[company_name].get('phone_ls'):
                self.company_info_dict[company_name]['phone_ls'] = []
            self.company_info_dict[company_name]['phone_ls'].append({
                'phone':phone,
                'source_ls':source_name_ls,
                'source_href_ls':source_href_ls
            })
            print(self.company_info_dict[company_name])
            print('记录成功')
        if  self.company_info_dict.get(company_name):
            self.company_info_dict[company_name]['phone_count'] = phone_count
        else:
            self.company_info_dict[company_name] = {
                'start_year':self.start_year,
                'phone_count':phone_count

            }

        for ls in save_ls:
            with open('jz_info_lxy.csv', 'a', encoding='utf-8') as f:
                start_year = self.company_info_dict[company_name].get('start_year')
                f.write(f'{company_name},{start_year},{phone_count},{ls[2]},{str(ls[0])},{str(ls[1])}\n')





    def main(self):
        self.browser.maximize_window()
        self.browser.get('https://uc.weiwenjia.com/web/index.html?appToken=a14cc8b00f84e64b438af540390531e4#/lxyunLogin')
        # 登录
        self.login()
        time.sleep(7)
        # 进入高级搜索
        for i in range(7):
            self.browser.get('https://lxcrm.weiwenjia.com/soukebox/advanced_search')
            # use_button_elem = self.wai.until(EC.element_to_be_clickable((By.XPATH,'//button[contains(@class,"el-button--text")]/span[contains(text(),"使用")]')))
            # use_button_elem.click()
            time.sleep(5)
            # 更改搜索条件
            self.start_year -= i
            self.change_search_param(left_times=i,right_times=i)
            # # time.sleep(1000)
            # 等待 公司信息加载
            try:
                self.wait_company_info_lode()
            except:
                print('找到0条企业')
                continue
            self.save_company_info()
            time.sleep(5)
        with open('result_lxy.json','w',encoding='utf-8') as f:
            json.dump(self.company_info_dict,f,ensure_ascii=False,indent=4)



if __name__ == '__main__':
    spider = AutoSpider()
    spider.main()
    result = {
        '广州探迹科技有限公司':{
            'start_year':'2021-05-31',
            'phone_count':4,
            'phone_ls':[
                {
                    'phone':'17709002923',
                    'source_ls':[],
                    'source_href_ls':[]
                }
            ]
        }
    }
