import requests
from bs4 import BeautifulSoup
from multiprocessing import Process, Queue, Lock
import time
import random
from fake_useragent import UserAgent

dqs = {
    # '410': '全国',
    '010': '北京',
    '020': '上海',
    '030': '天津',
    '040': '重庆',
    '050020': '广州',
    '050090': '深圳',
    '060080': '苏州',
    '060020': '南京',
    '070020': '杭州',
    '210040': '大连',
    '280020': '成都',
    '170020': '武汉',
    '270020': '西安'
}
# User_Agent = [
#     'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.60 Safari/537.36 Edg/100.0.1185.29',
#     'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
#     'User-Agent:Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11'
# ]
ua = UserAgent()


# 把url中的参数去掉
def url_clip(url: str):
    return url.split('?', maxsplit=1)[0]


def is_banned(soup):
    title = soup.select("title")
    if len(title) > 0 and title[0].text == "猎聘安全中心":
        return True
    else:
        return False


def read_proxy_queue(q: Queue, need_sleep=False):
    while True:
        if not need_sleep:
            return q.get()
        if q.empty():
            print("waiting for proxy")
            time.sleep(random.random() * 5)
        else:
            return q.get()


#爬取所有职位详情页的链接
def get_all_job_link(start_url,
                     params=None,
                     filename=None,
                     job_url_queue: Queue = None,
                     proxy_queue: Queue = None,
                     proxy=None):
    count = 0
    currentPage = 0
    all_url = set()

    if proxy_queue is not None and proxy is None:
        proxy = read_proxy_queue(proxy_queue)
    # else:
    #     proxy = None

    while True:
        try_time = 0
        while try_time <= 5:
            try_time += 1
            headers = {'User-Agent': ua.random}
            params['currentPage'] = str(currentPage)

            try:
                page = requests.get(url=start_url,
                                    headers=headers,
                                    params=params,
                                    proxies=proxy,
                                    timeout=7)
            except (requests.ReadTimeout, requests.ConnectTimeout,
                    requests.ConnectionError):
                print(
                    f"Timeout when access {start_url} with proxy {proxy} try time {try_time}/5"
                )
                if proxy_queue is not None:
                    proxy = read_proxy_queue(proxy_queue, need_sleep=True)
                    continue
                continue

            if not page.ok:
                print(page.status_code, page.text)
                print(f"status code is {page.status_code} time {try_time}")
                continue
            else:
                soup = BeautifulSoup(page.text, "html.parser")
                if is_banned(soup):
                    print(
                        f"Accessing is banned! times {try_time}/5 when use {proxy} to access {page.url}"
                    )
                else:
                    break
            try_time += 1

        # 下一页的按钮
        next_button = soup.select("li[title='下一页'] button svg")
        if len(next_button) == 0:
            if filename is not None and len(all_url) != 0:
                with open(filename, 'a', encoding="UTF-8") as f:
                    f.write(dqs[params['dq']] + '\n')
                    f.writelines(map(lambda x: x + '\n', all_url))
                    f.write("\n")
            print(
                f"get all job link finished for {params['key']}{dqs[params['dq']]}"
            )
            return all_url

        currentPage += 1
        print(
            f"******{params['key']}{dqs[params['dq']]} page{currentPage}*****")

        # 职位列表
        for entry in soup.select(
                "li div.job-list-item a[data-nick='job-detail-job-info']"):
            # count += 1
            # print(f"{params['key']}{dqs[params['dq']]} link {count}>")
            # 职位的名称 地区 薪资 链接
            title = entry.select("div.job-title-box div")[0].text
            ellipsis = entry.select("span.ellipsis-1")[0].text
            salary = entry.select("span.job-salary")[0].text
            # print(title, ellipsis, salary)
            url = url_clip(entry["href"])
            all_url.add(url)

            if job_url_queue is not None:
                job_url_queue.put((params['key'], url))

            # print(url)
        time.sleep(random.random() / 2)


#爬取职位详情页的信息
def get_job_info(url, filename=None, proxy=None, lock: Lock = None) -> bool:
    '''
    Return False if the accessing is banned else True
    '''
    data = url + '\n'

    for times in range(10):
        headers = {'User-Agent': ua.random}
        try:
            page = requests.get(url=url,
                                headers=headers,
                                proxies=proxy,
                                timeout=15)
        except (requests.ReadTimeout, requests.ConnectTimeout,
                requests.ConnectionError):
            print(f"Timeout when use {proxy} to access {url}")
            return False
        if not page.ok:
            print(page.status_code, page.text)
            continue
        soup = BeautifulSoup(page.text, "html.parser")

        if is_banned(soup):
            print(
                f"Accessing is banned! times {times+1}/10 when use {proxy} to access {url}"
            )
            print(proxy, headers)
        else:
            # 职位标题 特性
            job = soup.select(
                "section.job-apply-container div.job-apply-content")
            if len(job) == 0:
                print(f"cannot analysis {url}")
                return True
            job = job[0]
            print(f"access {url} successfully")
            break
    else:
        return False
    name = job.select("div.name-box")[0]
    job_properties = job.select("div.job-properties")[0]
    # print(" ".join(name.text.split()))
    # print(" ".join(job_properties.text.split()))
    data += " ".join(name.text.split()) + '\n'
    data += " ".join(job_properties.text.split()) + '\n'

    # 职位介绍
    for info in soup.select("section.job-intro-container dl.paragraph"):
        # 小标题 如职位介绍
        title = tuple(map(lambda x: x.text, info.select("dt")))
        assert len(title) == 1, "len(title) is not 1"
        # 职位标签
        tag = map(lambda x: x.text, info.select("div.tag-box ul li"))
        # 内容
        content = map(lambda x: x.text, info.select("dd"))

        # print(title[0])
        # print(" ".join(tag))
        data += title[0] + '\n' + " ".join(tag) + '\n'
        for i in content:
            data += i.replace("\n\n", '\n').replace("\r\n\r\n", "\n").replace(
                "\r", "") + '\n'
        data += '\n'
        # if title[0] == "职位介绍":
        #     end = ' '
        # elif title[0] == "其他信息":
        #     end = '\n'
        # else:
        #     end = '*'
        # for i in content:
        #     # print(i, end=end)
        #     data += i + end
    if filename is not None:
        if lock is not None:
            with lock:
                with open(filename, 'a', encoding="UTF-8") as f:
                    f.write(data + '\n\n')
        else:
            with open(filename, 'a', encoding="UTF-8") as f:
                f.write(data + '\n\n')
    return True


def get_proxies_url(proxy_list=None,
                    proxy=None,
                    proxy_queue: Queue = None,
                    flag_queue: Queue = None):
    for i in range(15):
        proxies_web_url = "https://www.kuaidaili.com/free/inha/" + str(i + 1)
        for try_time in range(5):
            headers = {'User-Agent': ua.random}
            try:
                page = requests.get(url=proxies_web_url,
                                    headers=headers,
                                    proxies=proxy,
                                    timeout=7)
                break
            except (requests.ReadTimeout, requests.ConnectTimeout,
                    requests.ConnectionError):
                print(
                    f"Timeout {try_time}/3  when use {proxy} to access {page.url}"
                )
                print(headers)
        else:
            return
        soup = BeautifulSoup(page.text, "html.parser")
        entries = soup.select("tbody tr")
        for entry in entries:
            ip = entry.select("td[data-title='IP']")[0].text
            port = entry.select("td[data-title='PORT']")[0].text
            proxy_type = entry.select("td[data-title='类型']")[0].text
            proxy = {proxy_type.lower(): ip + ':' + port}
            try:
                res = requests.get("http://www.baidu.com",
                                   proxies=proxy,
                                   timeout=7)
                if res.ok:
                    print(f"{proxy} is available")
                    proxy_list.append(proxy)
                    if proxy_queue is not None and flag_queue is not None:
                        while True:
                            if proxy_queue.full():
                                if not flag_queue.empty():
                                    return
                                else:
                                    time.sleep(random.random() * 5)
                            else:
                                proxy_queue.put(proxy)
                                break
                    if proxy_list is not None:
                        proxy_list.append(proxy)
                else:
                    print(f"reason isn't ok, proxies {proxy} isn't available")
            except (requests.ReadTimeout, requests.ConnectTimeout,
                    requests.ConnectionError):
                print(f"Timeout proxies {proxy} isn't available")
            if flag_queue is not None and not flag_queue.empty():
                return
            # except:
            #     print("other exception")
            #     exit(-1)


class ProxiesPool(Process):

    def __init__(self, name, flag_queue: Queue, proxy_queue: Queue) -> None:
        super().__init__()
        self.name = name
        self.flag_queue = flag_queue
        self.proxy_queue = proxy_queue

    def run(self):
        self_proxy = None
        self_proxy_list = []
        finished = False
        while not finished:
            if not self.flag_queue.empty():
                finished = True
                break
            proxy_list = []
            get_proxies_url(proxy_list=proxy_list, proxy=self_proxy, proxy_queue=self.proxy_queue, flag_queue=self.flag_queue)
            if len(proxy_list) == 0:
                print("the proxy list is empty")
                if self_proxy is not None:
                    self_proxy_list.remove(self_proxy)
                    print(f"remove the current self proxy {self_proxy}")
                if len(self_proxy_list) == 0:
                    print("get proxies with proxy None")
                    self_proxy = None
                else:
                    self_proxy = random.choice(self_proxy_list)
            else:
                self_proxy = random.choice(proxy_list)
                rest = 20 - len(self_proxy_list)
                self_proxy_list.extend(random.choices(proxy_list, k=rest))
            # for proxy in proxy_list:
            #     while True:
            #         if self.proxy_queue.full():
            #             if not self.flag_queue.empty():
            #                 finished = True
            #                 return
            #             else:
            #                 time.sleep(random.random() * 5)
            #         else:
            #             self.proxy_queue.put(proxy)
            #             break
            time.sleep(random.random() * 5)


class JobListSpider(Process):

    def __init__(self, name, proxy_queue: Queue, job_url_queue: Queue) -> None:
        super().__init__()
        self.name = name
        self.proxy_queue = proxy_queue
        self.job_url_queue = job_url_queue

    def run(self):
        keys = ["人工智能", "设计", "运营", "管理", "市场", "数据标注"]
        start_url = "https://www.liepin.com/zhaopin"
        proxy = read_proxy_queue(self.proxy_queue)
        for key in keys:
            filename = "./output/url_" + key + ".txt"
            for dq in dqs.keys():
                params = {'key': key, 'dq': dq}
                start_time = time.time()
                get_all_job_link(start_url=start_url,
                                 params=params,
                                 job_url_queue=self.job_url_queue,
                                 filename=filename,
                                 proxy_queue=self.proxy_queue,
                                 proxy=proxy)
                print(f"{key}-{dqs[dq]} used {time.time()-start_time} s")


class JobDetailSpider(Process):

    def __init__(self, name, flag_queue: Queue, proxy_queue: Queue,
                 job_url_queue: Queue, file_lock: Lock) -> None:
        super().__init__()
        self.name = name
        self.flag_queue = flag_queue
        self.proxy_queue = proxy_queue
        self.job_url_queue = job_url_queue
        self.file_lock = file_lock
        # self.queue_lock = queue_lock

    def run(self):
        finished = False
        proxy = read_proxy_queue(self.proxy_queue, need_sleep=True)
        while not finished:
            # 当结束标志存在时 需要处理完队列中的任务
            if not self.flag_queue.empty():
                finished = True
            if self.job_url_queue.empty():
                if finished:
                    break
                else:
                    time.sleep(random.random() * 3)
            else:
                # with self.queue_lock:
                #     key_job_url = self.job_url_queue.get()
                key_job_url = self.job_url_queue.get()
                key = key_job_url[0]
                job_url = key_job_url[1]
                filename = "./output/detail_" + key + ".txt"
                while True:
                    access = get_job_info(job_url,
                                          filename=filename,
                                          proxy=proxy,
                                          lock=self.file_lock)
                    time.sleep(random.random() * 0.05)
                    if not access:
                        proxy = read_proxy_queue(self.proxy_queue,
                                                 need_sleep=True)
                    else:
                        break


if __name__ == "__main__":
    # job_url = "https://www.liepin.com/a/34214503.shtml"
    # get_job_info(job_url)
    # get_proxies_url()
    # start_url = "https://www.liepin.com/zhaopin/?key=人工智能&dq=010&scene=page"
    # keys = ["人工智能", "设计", "运营", "管理", "市场", "数据标注"]
    # start_url = "https://www.liepin.com/zhaopin"
    # for key in keys:
    #     filename1 = "./output/url_" + key + ".txt"
    #     filename2 = "./output/detail_" + key + ".txt"
    #     for dq in dqs.keys():
    #         start_time = time.time()
    #         params = {'key': key, 'dq': dq}
    #         all_url = get_all_job_link(start_url,
    #                                    params=params,
    #                                    filename=filename1)
    #         for i, job_url in enumerate(all_url):
    #             print(f"link {i} >", end='     ')
    #             get_job_info(job_url, filename=filename2)
    #         print(f"{key}-{dqs[dq]} used {time.time()-start_time} s")
    # job_url = "https://www.liepin.com/a/34214503.shtml"
    # job_url = "https://www.liepin.com/job/1941504149.shtml"
    # get_job_info(job_url)
    start_time = time.time()
    file_lock = Lock()
    # queue_lock = Lock()
    proxy_flag_queue = Queue()
    spider_flag_queue = Queue()
    proxy_queue = Queue(maxsize=10)
    job_url_queue = Queue()
    proxy_pool_process = ProxiesPool("proxy_pool_process",
                                     flag_queue=proxy_flag_queue,
                                     proxy_queue=proxy_queue)
    job_list_spider = JobListSpider("job_list_spider",
                                    proxy_queue=proxy_queue,
                                    job_url_queue=job_url_queue)
    # job_detail_spider1 = JobDetailSpider("job_detail_spider",
    #                                      flag_queue=spider_flag_queue,
    #                                      proxy_queue=proxy_queue,
    #                                      job_url_queue=job_url_queue,
    #                                      file_lock=file_lock)
    # job_detail_spider2 = JobDetailSpider("job_detail_spider",
    #                                      flag_queue=spider_flag_queue,
    #                                      proxy_queue=proxy_queue,
    #                                      job_url_queue=job_url_queue,
    #                                      file_lock=file_lock)
    job_detail_spiders = []
    for i in range(5):
        job_detail_spiders.append(
            JobDetailSpider("job_detail_spider" + str(i + 1),
                            flag_queue=spider_flag_queue,
                            proxy_queue=proxy_queue,
                            job_url_queue=job_url_queue,
                            file_lock=file_lock))

    proxy_pool_process.start()
    job_list_spider.start()
    for process in job_detail_spiders:
        process.start()

    job_list_spider.join()
    spider_flag_queue.put(True)
    print("job list spider finished")

    for process in job_detail_spiders:
        process.join()
    proxy_flag_queue.put(True)
    print("job detial spider finished")

    proxy_pool_process.join()
    print("proxy pool process spider finish")

    print(f"all processes stop using {time.time()-start_time} s")
