import requests
import re
from bs4 import BeautifulSoup
from lxml import etree
from retry import retry
import DateUtil, Sleep, UAPool
from functools import reduce


class Spider:
    """获取兼职猫数据"""

    # @staticmethod 装饰器：可以将方法作为静态方法，不用传入self、cls
    @staticmethod
    @retry(tries=3, delay=1, max_delay=3)
    def __get_index_page():
        """向首页进行请求"""
        Sleep.sleep()
        url = 'https://www.jianzhimao.com/ctrlcity/changeCity.html'
        resp = requests.get(url=url, headers=UAPool.get_headers())
        resp.encoding = 'utf-8'
        return resp

    def get_city_data(self):
        """获取所有城市信息"""
        resp = self.__get_index_page()
        code = resp.status_code
        # 对页面响应状态进行判断
        if code != 500:
            tree = etree.HTML(resp.text)
            # 获取所有的城市名称和地址
            all_city_name = tree.xpath("//ul[@class='city_table']/li/a/text()")
            all_city_href = tree.xpath("//ul[@class='city_table']/li/a/@href")
            city_dict = dict(zip(all_city_name, all_city_href))
            return city_dict
        return False

    @staticmethod
    # @retry 装饰器：捕获函数异常、对函数进行重新运行，设置延迟运行
    @retry(tries=3, delay=1, max_delay=3)
    def __get_city_page(city_url):
        """获取城市页面"""
        Sleep.sleep()
        headers = UAPool.get_headers()
        headers.update({'Connection': 'close'})
        resp = requests.get(url=city_url, headers=headers)
        resp.encoding = 'utf-8'
        return resp

    def get_area_data(self, city_url):
        """获取城市内区域数据,并封装成字典 name：url"""
        resp = self.__get_city_page(city_url)
        code = resp.status_code
        # 对页面响应状态进行判断
        if code != 500:
            tree = etree.HTML(resp.text)
            # 获取城市内所有区域名称和地址
            all_area_name = tree.xpath("//ul[@class='box']/li[3]/a[position() > 1]/text()")
            all_area_href = tree.xpath("//ul[@class='box']/li[3]/a[position() > 1]/@href")
            # 去除网址中的空格
            all_area_href = list(
                map(lambda href: re.sub(r'\s', '', href) if re.findall(r'\s', href) else href, all_area_href))
            # 获取区域地址前缀
            city_href = re.findall(r'.*?\.com', city_url)[0]
            # 拼接区域地址
            all_area_href = [city_href + href for href in all_area_href]
            # 将区域名称和地址封装为字典
            area_list = dict(zip(all_area_name, all_area_href))
            return area_list
        # 将没有数据的城市返回 500，交给调用方，进行判断，将该城市status修改为-1
        return False

    @staticmethod
    @retry(tries=3, delay=1, max_delay=3)
    def __get_area_page(area_url):
        """获取区域内的页面"""
        Sleep.sleep()
        resp = requests.get(url=area_url, headers=UAPool.get_headers())
        resp.encoding = 'utf-8'
        return resp

    @staticmethod
    def __get_area_url_list(area_url, page_text):
        """获取该区域的所有页数地址"""
        tree = etree.HTML(page_text)
        # 获取所有兼职的所有页数网址
        all_href_list = tree.xpath("//ul[@id='content_page_wrap']/li//@href")
        # 判断页面是否有只有 “一页”
        if len(all_href_list) > 1:
            # 去除重复的网址——“下一页”
            all_href_list = reduce(lambda x, y: x if y in x else x + [y], [[]] + all_href_list)
        # 解析城市地区地址的前缀
        city_href = re.compile(r'.*?\.com').findall(area_url)[0]
        # 将所有页面地址补全
        all_href_list = [city_href + href for href in all_href_list]
        return all_href_list

    def get_job_data(self, area_url):
        """获取该区内的所有兼职信息"""
        resp = self.__get_area_page(area_url)
        code = resp.status_code
        # 对页面响应状态进行判断
        if code != 500:
            tree = etree.HTML(resp.text)
            # 循环获取当前区域所有页面数据
            job_data = []
            for href in self.__get_area_url_list(area_url, resp.text):
                resp = self.__get_area_page(href)
                if resp.status_code != 500:
                    tree = etree.HTML(resp.text)
                    # 获取所有兼职标题
                    all_job_title = tree.xpath("//ul[@id='content_list_wrap']//a/@title")
                    # 获取所有兼职地址
                    all_job_href = tree.xpath("//ul[@id='content_list_wrap']//a/@href")
                    # 解析城市地区地址的前缀
                    city_href = re.compile(r'.*?\.com').findall(area_url)[0]
                    # 补全兼职地址前缀
                    all_job_href = [city_href + href for href in all_job_href]
                    # 获取所有兼职访问人数
                    all_job_pv = tree.xpath("//div[@class='left visited']//@title")
                    # 获取所有兼职发布时间
                    all_job_date = tree.xpath("//div[@class='left date']//@title")
                    # 通过转换将获取到的 “不详细时间” 转为时间
                    all_job_time = list(map(DateUtil.str_date, all_job_date))
                    # 获取区域页面的兼职信息
                    for data in list(zip(all_job_title, all_job_href, all_job_pv, all_job_time)):
                        job_data.append(data)
                    # 判断获取的数据是否为空，为空代表该区域无兼职信息
                else:
                    return False
            if job_data:
                all_job_data = []
                # 将获取的数据封装成字典，便于搜索
                for data in job_data:
                    all_job_data.append({'title': data[0], 'url': data[1], 'pv': data[2], 'date': data[3]})
                return all_job_data
            else:
                return False

    @staticmethod
    @retry(tries=3, delay=1, max_delay=3)
    def __get_job_page(job_url):
        """获取兼职页面的"""
        resp = requests.get(url=job_url, headers=UAPool.get_headers())
        resp.encoding = 'utf-8'
        return resp

    def get_job_info(self, job_url):
        """获取兼职页面内的信息"""
        resp = self.__get_job_page(job_url)
        code = resp.status_code
        try:
            # 对页面响应状态进行判断
            if code != 500:
                job_info = []
                tree = etree.HTML(resp.text)
                soup = BeautifulSoup(resp.text, 'lxml')
                # 获取工作的类型
                job_type = tree.xpath("//a[@class='job_type']/text()")
                # 获取工作的招聘人数
                job_headcount = tree.xpath("//ul[@class='job_list'][1]/li[1]/span[@class='con']/text()")
                # 获取工作的地址
                job_address = tree.xpath("//ul[@class='job_list'][1]/li[2]/span[@class='con']/text()")
                # 获取工作的工资
                job_wage = tree.xpath("//ul[@class='job_list'][3]/li[2]/span[@class='con']/text()")
                # 获取工作的内容
                job_detail = [soup.find('div', id='job_detail').text]
                # 获取公司的名称
                com_name = tree.xpath("//a[@class='com_name']/text()")
                # 获取公司的介绍
                com_profile = tree.xpath("//div[@class='company_info']/p[1]/text()")
                # 获取公司的地址
                com_address = tree.xpath("//div[@class='company_info']/p[2]/text()")
                job_info.append(
                    list(zip(job_type, job_headcount, job_address, job_wage, job_detail, com_name, com_profile,
                             com_address)))
                if job_info:
                    all_job_info = []
                    # 将获取的数据封装成字典，便于搜索
                    for data in job_info[0]:
                        all_job_info.append(
                            {'type': data[0], 'headcount': data[1], 'address': data[2], 'wage': data[3], 'detail': data[4],
                             'com_name': data[5], 'com_profile': data[6], 'com_address': data[7]})
                    return all_job_info
                else:
                    return False
        except:
            pass

if __name__ == '__main__':
    sp = Spider()
    res = sp.get_job_data('http://dongguan.jianzhimao.com/nancheng_zbx_0/')
    print(res)
