from lxml import etree
import json
from selenium import webdriver
import csv
from time import sleep
import random
import os
from pathlib import Path
import pandas as pd
import re
from selenium.webdriver.support.wait import WebDriverWait

def extract_first(data):
    #如果列表是空的话data[0]会报错，所以先判定一下是否为空再提
    if data:
        return data[0]
    return ""
def print_save_txt(content):
    print(content)
    with open('process_recoder.txt','a',encoding='utf-8') as file:
        file.write(content)
        file.flush()
def try_except(func):
    def inner(self, response, *args, **kwargs):
        try:
            return func(self, response, *args, **kwargs)
        except Exception as e:
            print("Got error:{}".format(e))
            return None
    return inner
#不存在文件夹就创建
def create_folder(folder_path):
    if Path(folder_path).is_dir():
        pass  # 如果文件夹存在就过
    else:
        os.mkdir(folder_path)  # 创建文件夹

class data_tidy:
    def __init__(self,filename,names=None,encoding='utf-8'):
        suffix = filename[filename.rfind('.') + 1:]
        if suffix == 'csv':
            if names != None:
                header = None
            else:
                header = 0
            self.data = pd.read_csv('filename',header=header,names = names,encoding=encoding)
        elif suffix == 'xlsx':
            if names != None:
                header = None
            else:
                header = 0
            self.data = pd.read_excel('filename', header=header, names=names, encoding=encoding)

        # self.position_info_frame = pd.read_csv('position_info.csv',header=None,names = [])
        # self.position_detail_Frame = pd.read_csv('position_detail.csv',header=None,names = [])
        # self.company_Frame = pd.read_csv('company_info.csv',header=None,names = [])
        # self.company_Frame = pd.read_csv('company_positions.csv',header=None,names = [])

    #去重
    def drop_duplicates(self,inplace=True):
        df = self.data.drop_duplicates(inplace=inplace)
        return df
    #去掉都是nan的行
    def drop_all_na(self,inplace=True):
        self.data.dropna(how='all',inplace=inplace)
    #去重+去掉都是nan的行
    def drop_duplicates_nan(self,inplace=True):
        self.drop_duplicates(inplace=inplace)
        self.drop_all_na(inplace=inplace)
    # 去掉某些列中有空值的行
    def columne_drop_na(self, columne_list, inplace=True):
        df = self.data.dropna(axis=0, subset=columne_list, inplace=inplace)
        # 之前自己写的
        # drop_index = []
        # for columne in columne_list:
        #     missing_index = self.data[self.data[columne].isnull().values == True].index  # 得到columne有缺失值的行的索引，
        #     drop_index = list(set(drop_index)|set(missing_index))
        # df = self.data.drop(drop_index, inplace=inplace)
        return df
    # 按分隔符进行分列操作
    def columne_split(self,columne,split,split_name_list):
        pass
    # 添加一列
    def add_columne(self):
        pass
    # 判断要查找的是否在数据中
    def isin_data(self,find_map):
        return True
    #
    def drop_win_forbid(self, columne):
        def drop_forbid(x):
            p = re.compile(r'[-,$()#+&*？/，\\。“”?、·~"\*<>|]')
            s = re.split(p, x)
            return '_'.join(s)
        self.data[columne] = self.data[columne].map(lambda x : drop_forbid(x))
        return self.data

class SaveTxt:
    def __init__(self,fileneme,mod = 'a',encoding='utf-8'):
        self.filename = fileneme
        self.mod = mod
        self.encoding = encoding

    def save_txt(self,content):
        with open(self.filename, self.mod, encoding=self.encoding) as file:
            file.write(content)
            file.flush()

class SaveCsv:
    def __init__(self,filename,mod = 'a',encoding='utf-8-sig'):
        self.filename = filename
        self.mod = mod
        self.encoding = encoding

    def save_csv(self,write):
        with open(self.filename,self.mod,encoding=self.encoding,newline='') as file:
            csv_writer = csv.writer(file)
            csv_writer.writerow(write)
            file.flush()

class SavePicture:
    def __init__(self,folder):
        self.folder = folder
        if Path(self.folder).is_dir():pass #如果文件夹存在就过
        else:os.mkdir(self.folder)  # 创建文件夹

    #这个方法试了下用不了，提示没有.screenshot_as_jpg，还得改成request.get
    def save_pic(self,xpath,filename,driver):
        with open(self.folder + '\\'+ filename, 'wb') as file:
            file.write(driver.find_element_by_xpath(xpath).screenshot_as_jpg)
            file.flush()

class SaveSQL:
    pass

class LagouSelenium:
    # url和储存的文件名
    def __init__(self,key_word,district=None):
        self.key_word = key_word

        self.base_url = 'https://www.lagou.com/'
        self.nation_search_url = 'https://www.lagou.com/jobs/list_{}?labelWords=&fromSearch=true&suginput='
        self.bj_search_url = 'https://www.lagou.com/jobs/list_{}/p-city_2?&cl=false&fromSearch=true&labelWords=&suginput='
        self.bj_district_url = 'https://www.lagou.com/jobs/list_{}/p-city_2?px=default&district={}#filterBox'
        self.district = district
        self.driver = webdriver.Chrome()

        self.position_info_csv = SaveCsv('position_info.csv')
        self.position_detail_csv = SaveCsv('position_detail.csv')
        self.company_csv = SaveCsv('company_info.csv')
        self.company_position = SaveCsv('company_positions.csv')
        self.save_img = SavePicture('hr_head_img')
        self.count = 1

        self.already_company = []

    # 登录后储存cookie
    def get_cookies(self):
        self.driver.get(self.base_url)
        sleep(60)
        cookies = self.driver.get_cookies()

        with open("lagou_cookies.txt", "w", encoding="utf-8") as file:
            file.write(json.dumps(cookies))
            file.flush()

        self.driver.close()

    # 用储存好的cookie登陆
    def cookie_login(self,area='beijin_d'):
        # 使用储存好的cookie进行登陆
        if area == 'beijin':
            url = self.bj_search_url.format(self.key_word)
        elif area == 'beijin_d':
            url = self.bj_district_url.format(self.key_word,self.district)
        else:
            url = self.nation_search_url.format(self.key_word)

        self.driver.get(self.base_url)
        self.driver.delete_all_cookies()

        with open('lagou_cookies.txt', 'r', encoding='utf-8') as file:
            cookies_str = file.read()
        cookies = json.loads(cookies_str)

        for cookie in cookies:
            self.driver.add_cookie(cookie)
        self.driver.refresh()
        sleep(random.uniform(4, 7))
        self.driver.get(url)
        sleep(random.uniform(4, 7))

    # 获取职位列表信息
    def job_list_save(self,tree):
        job_list = tree.xpath('//div[contains(@class, "s_position_list")]/ul/li')
        for job in job_list:
            job_dict = {}
            job_dict['title'] = extract_first(job.xpath('./div/div/div/a/h3/text()'))
            job_dict['area'] = extract_first(job.xpath('./div/div/div/a/span/em/text()'))
            job_dict['time'] = extract_first(job.xpath('./div/div/div/span/text()'))
            job_dict['money'] = extract_first(job.xpath('./div/div/div/div/span[@class="money"]/text()'))
            experience_education =  ''.join(job.xpath('./div/div/div/div[@class="li_b_l"]/text()')).replace('\n','').replace(' ','')
            job_dict['experience'] = experience_education.split('/')[0]
            job_dict['education'] = experience_education.split('/')[1]
            job_dict['company'] = extract_first(job.xpath('./div/div[@class="company"]/div/a/text()'))
            job_dict['company_info'] = extract_first(job.xpath('./div/div/div[@class="industry"]/text()'))
            # 这里里面有的有多个标签，不能extract_ferst,还得join
            job_dict['position_lable'] = '/'.join(job.xpath('./div[@class="list_item_bot"]/div[@class="li_b_l"]/span/text()'))
            job_dict['goodness'] = extract_first(job.xpath('./div[@class="list_item_bot"]/div[@class="li_b_r"]/text()'))

            info_list = [job_dict.get(i) for i in job_dict.keys()]
            info_list.insert(0,self.key_word)
            self.position_info_csv.save_csv(info_list)  # 将内容储存

    # 获取职位详情信息
    def job_detail_save(self,detail_tree):
        job_detail_dict = {}
        job_detail_dict['title'] = extract_first(detail_tree.xpath('//h1[@class="name"]/span/span/span[contains(@class,"position")]/text()'))
        job_detail_dict['money'] = extract_first(detail_tree.xpath('//h1[@class="name"]/span/span/span[@class="salary"]/text()'))
        job_detail_dict['city'] = extract_first(detail_tree.xpath('//dd/h3/span[1]/text()')).replace('/','').replace(' ','')
        job_detail_dict['experience'] = extract_first(detail_tree.xpath('//dd/h3/span[2]/text()')).replace('/','').replace(' ','')
        job_detail_dict['education'] = extract_first(detail_tree.xpath('//dd/h3/span[3]/text()')).replace('/','').replace(' ','')
        job_detail_dict['fullorpart'] = extract_first(detail_tree.xpath('//dd/h3/span[4]/text()')).replace('/','').replace(' ','')
        job_detail_dict['job_type'] = extract_first(detail_tree.xpath('//dd/h3/div/span[@class="text"]/text()'))
        job_detail_dict['position_lable'] = '/'.join(detail_tree.xpath('//dd/ul[contains(@class,"position")]/li/text()'))
        job_detail_dict['position_detail'] = ''.join(detail_tree.xpath('//div/div/dl/dd[2]/div/text()'))
        job_detail_dict['emplyer'] = extract_first(detail_tree.xpath('//dd/p/span/text()'))
        job_detail_dict['publish_time'] = ''.join(detail_tree.xpath('//dd/p[@class="publish_time"]/text()')).replace('\n','').replace(' ','')
        job_detail_dict['advantage'] = extract_first(detail_tree.xpath('//div/div/dl/dd/p/text()'))
        job_detail_dict['company'] = ''.join(detail_tree.xpath('//div/h3/em[@class="fl-cn"]/text()')).replace('\n','').replace(' ','')
        job_detail_dict['industry'] = extract_first(detail_tree.xpath('//dd/ul/li[1]/h4/text()'))
        job_detail_dict['develop'] = extract_first(detail_tree.xpath('//dd/ul/li[2]/h4/text()'))
        job_detail_dict['employee'] = extract_first(detail_tree.xpath('//dd/ul/li[3]/h4/text()'))
        job_detail_dict['home_page'] = extract_first(detail_tree.xpath('//dd/ul/li[4]/a/h4/text()'))
        job_detail_dict['work_city'] = extract_first(detail_tree.xpath('//div/div/dl/dd[3]/div/a[1]/text()'))
        job_detail_dict['district'] = extract_first(detail_tree.xpath('//div/div/dl/dd[3]/div/a[2]/text()'))
        job_detail_dict['street'] = extract_first(detail_tree.xpath('//div/div/dl/dd[3]/div/a[3]/text()'))
        job_detail_dict['address'] = ''.join(detail_tree.xpath('//div/div/dl/dd[3]/div/text()')).replace('\n','').replace('-','').replace(' ','')
        job_detail_dict['hr'] = extract_first(detail_tree.xpath('//dd/div/div/a/span/text()'))
        job_detail_dict['hr_title'] = extract_first(detail_tree.xpath('//dd/div/div/span[@class="pos"]/text()'))

        detail_list = [job_detail_dict.get(i) for i in job_detail_dict.keys()]
        detail_list.insert(0, self.key_word)
        self.position_detail_csv.save_csv(detail_list)  # 保存数据

        sleep(random.uniform(2, 3))

        company = job_detail_dict.get('company')
        if company not in self.already_company:
            self.click_switch_save_close('//div/dl/dt/a/img', self.company_save)#点击公司——获取——保存

    #获取公司详情信息
    def company_save(self,company_tree):
        company_dict = {}
        company_dict['company'] = extract_first(company_tree.xpath('//div/div/h1/a/@title'))
        company_dict['position_num'] = ''.join(company_tree.xpath('//div/ul/li[1]/strong/text()')).replace('/','').replace(' ','')
        company_dict['in_time'] = ''.join(company_tree.xpath('//div/ul/li[2]/strong/text()')).replace('/','').replace(' ','')
        company_dict['read_time'] = ''.join(company_tree.xpath('//div/ul/li[3]/strong/text()')).replace('/','').replace(' ','')
        company_dict['industry'] = extract_first(company_tree.xpath('//div[@class="item_content"]/ul/li[1]/span/text()'))
        company_dict['develop'] = extract_first(company_tree.xpath('//div[@class="item_content"]/ul/li[2]/span/text()'))
        company_dict['employee'] = extract_first(company_tree.xpath('//div[@class="item_content"]/ul/li[3]/span/text()'))
        company_dict['city'] = extract_first(company_tree.xpath('//div[@class="item_content"]/ul/li[4]/span/text()'))
        company_dict['register_name'] = extract_first(company_tree.xpath('//div/div/div[@class="info_item"][1]/div[@class="content"]/text()'))
        company_dict['register_time'] = extract_first(company_tree.xpath('//div/div/div[@class="info_item"][2]/div[@class="content"]/text()'))
        company_dict['register_money'] = extract_first(company_tree.xpath('//div/div/div[@class="info_item"][3]/div[@class="content"]/text()'))
        company_dict['register_man'] = extract_first(company_tree.xpath('//div/div/div[@class="info_item"][4]/div[@class="content"]/text()'))
        company_dict['company_lable'] = '/'.join(company_tree.xpath('//div/div/ul/li[@class="con_ul_li"]/text()')).replace('\n','').replace(' ','')

        company_list = [company_dict.get(i) for i in company_dict.keys()]
        self.company_csv.save_csv(company_list)
        self.already_company.append(company_dict['company'])

        #company = company_dict.get('company')
        #self.click_get_tree('//div[@class="company_navs"]/div/ul/li[2]/a') # 点击公司职位
        #self.company_position_save(company) # 获取公司在招职位

    # 获取公司在招职位
    def company_position_save(self,company):
        #替换掉字符串中的空格和回车
        def replace(s):
            return s.replace(' ','').replace('\n','')
        #获取并储存所需信息
        def get_jobs(i,job_list_tree):
            job_type = extract_first(job_list_tree.xpath('//div/div[contains(@class,"filter")]/ul/li[{:d}]/text()'.format(i)))
            position = job_list_tree.xpath('//div/div[2]/div[2]/ul/li/@data-positionname')
            salary = job_list_tree.xpath('//li/div/span[contains(@class,"salary")]/text()')
            require = job_list_tree.xpath('//li/p/span[@class="item_desc"]/text()')
            time = job_list_tree.xpath('//li/p/span[@class="item_date"]/text()')

            if position:  # 如果职位存在
                for a in range(len(position)):
                    job = [company, job_type, replace(position[a]), replace(salary[a]), replace(require[a]),replace(time[a])]
                    self.company_position.save_csv(job)

        #点击下一页并得到对应的tree
        def get_next_tree(xpath):
            next = self.driver.find_element_by_xpath(xpath)  # 获取下一页的span
            webdriver.ActionChains(self.driver).move_to_element(next).click(next).perform()
            self.driver.implicitly_wait(random.uniform(3, 5))
            return etree.HTML(self.driver.page_source)
        # 2-9来一遍是各种分类下的职位列表
        for i in range(2,10):
            job_list_tree = self.click_get_tree('//div/div/div/ul[@class="con_filter_ul"]/li[{:d}]'.format(i))
            get_jobs(i,job_list_tree)
            next_mark = extract_first(job_list_tree.xpath('//div/div/div/div/span[@class="next"]/text()'))
            print(next_mark)
            print(type(next_mark))
            print(next_mark is not None)
            print('#' * 50)
            while next_mark == '下一页': #如果下一页存在就继续爬
                next_page_tree = get_next_tree('//div/div/div/div/span[@class="next"]')
                get_jobs(i,next_page_tree)  # 继续爬下一页
                sleep(random.uniform(3, 4))
            sleep(random.uniform(2, 5))

    # 模拟点击，获取点击后的新tree
    def click_get_tree(self,click_xpath):
        click_button = self.driver.find_element_by_xpath(click_xpath)  # 获取按钮
        click_button.click()
        self.driver.implicitly_wait(5)
        return etree.HTML(self.driver.page_source)

    # 模拟点击，获取点击后的新tree，调用方法储存信息，然后在退回来
    def click_save_back(self,click_xpath,func):
        click_tree = self.click_get_tree(click_xpath)
        func(click_tree)
        self.driver.back()

    # 模拟点击，切换到新页面，获取点击后的新tree
    def click_switch_get_tree(self, click_xpath):
        click_button = self.driver.find_element_by_xpath(click_xpath)  # 获取按钮
        click_button.click() #进行点击
        self.switch_new_window()
        self.driver.implicitly_wait(5)
        return etree.HTML(self.driver.page_source)

    # 模拟点击，获取点击后的新tree，调用方法储存信息，然后在退回来
    def click_switch_save_close(self,click_xpath,func):
        click_tree = self.click_switch_get_tree(click_xpath)
        func(click_tree)
        self.driver.close()

    # 判断是否有下一页，有的话就点击下一页并调用自身
    def next_page(self,tree):
        next_mark = extract_first(tree.xpath('//span[@action="next"]/@class'))
        if next_mark.find("pager_next_disabled") == -1:
            next = self.driver.find_element_by_xpath('//span[@action="next"]')  # 获取下一页的span
            webdriver.ActionChains(self.driver).move_to_element(next).click(next).perform()
            self.count += 1  # 记录当前爬取的页数
            sleep(random.uniform(5, 10))
            self.get_job()  # 调用自身，继续爬下一页
        else:
            self.count = 0

    # 转换到新的页面
    def switch_new_window(self):
        wins = self.driver.window_handles
        self.driver.switch_to.window(wins[-1])

    #切换到最初的页面
    def switch_main_window(self):
        wins = self.driver.window_handles
        self.driver.switch_to.window(wins[0])

    # 获取职位列表中的信息，存入csv，有下一页就点，调用自身，全部拔下来
    def get_job(self):
        print_save_txt('1-关键词：{}，开始爬取第{}页\n'.format(self.key_word,str(self.count)))
        tree = etree.HTML(self.driver.page_source)
        self.job_list_save(tree) # 进行职位列表页面内容提取与储存

        # 获取当前页面的职位点击span
        # 获取多个元素一定要用find_elements，别忘了s，不然只能的到一个
        job_detail_buttens = self.driver.find_elements_by_xpath('//div/div/div/a/h3')
        #挨个点击再退回来
        job_count = 1
        for job_detail in job_detail_buttens:
            print_save_txt('——爬取第{}页,第 {} 条职位\n'.format(str(self.count), str(job_count)))
            try:
                job_detail.click()
            except:
                webdriver.ActionChains(self.driver).move_to_element(job_detail).click(job_detail).perform()
            self.switch_new_window() #切换到新的页面
            self.driver.implicitly_wait(5)
            sleep(random.uniform(1, 2))
            detail_tree = etree.HTML(self.driver.page_source) #构建职位详情页面的tree
            self.job_detail_save(detail_tree) #进行详情页面内容提取与储存
            self.switch_new_window()
            self.driver.close()  # 关闭当前页面
            self.switch_main_window() #切换到主页面
            job_count += 1

        # 判断是否有下一页，有的话就点击下一页并调用自身
        self.next_page(tree)


if __name__ == '__main__':

    area_list = ['海淀区','朝阳区','东城区','丰台区','昌平区','大兴区','西城区','通州区','顺义区','石景山区',
                 '房山区','门头沟区','怀柔区','延庆区','密云区','平谷区']
    lagou = LagouSelenium('数据分析','朝阳区')
    #lagou.get_cookies()
    lagou.cookie_login()
    lagou.get_job()






