import sys

sys.path.insert(0, '../')

from config import *
from zhilian_config import *

zhilian_db = client['zhilian_db']

# zhilian_db.drop_collection('zhilian_posts')
# zhilian_db.drop_collection('zhilian_corps')

zhilian_posts, zhilian_corps = zhilian_db['zhilian_posts'], zhilian_db['zhilian_corps']


class ZHILIAN_POST():
    # 初始化
    def __init__(self, industry):
        self.jobareas = [
            ['北京', '上海', '广州', '深圳', '杭州'],
            ['武汉', '西安', '南京', '成都', '重庆'],
            ['东莞', '大连', '沈阳', '苏州', '昆明'],
            ['长沙', '合肥', '宁波', '郑州', '天津'],
            ['青岛', '济南', '哈尔冰', '长春', '福州']
        ]

        self.keywords = {}

        self.keywords[industry] = keywords[industry]

        # 职位和企业信息统计,主要用于避免重复爬取
        self.jd_count = {}
        self.cd_count = {}

        self.login()

    # 登陆
    def login(self):
        data = {
            'int_count': '999',
            'errUrl': 'https://passport.zhaopin.com/account/login',
            'RememberMe': 'true',
            'requestFrom': 'portal',
            'loginname': '13735863577',
            'Password': '1991303017aaa'
        }

        r = requests.post(login_url, data = data, headers = login_headers)

        self.cookies = r.cookies

        self.begin()

    # 开始抓取
    def begin(self):
        for industry, v in self.keywords.items():
            for subIndustry, config in v.items():
                for keyword in config['relative']:
                    for jobarea in self.jobareas:
                        for ct in ['9', '1']:
                            page = 1
                            while True:
                                params = {
                                    "kw": keyword,
                                    "jl": '+'.join(jobarea),
                                    "sm": "0",
                                    "p": page,
                                    'ct': ct,
                                    'isfilter': '1',
                                    'in': ';'.join(config['industrytype4zhilian']),
                                    'bj': ''.join(config['funtype4zhilian'])
                                }

                                pprint.pprint(params)

                                r = requests.get(post_url, headers = post_headers, params = params, cookies = self.cookies, verify = False)

                                soup = BeautifulSoup(r.text)

                                if len(soup.select('.newlist_list_content .newlist')) <= 1:
                                    break

                                self.parse_job_list(soup, industry, subIndustry, keyword)

                                if not soup.find(class_ = 'next-page') or soup.find(class_ = 'next-page').get('href') is None:
                                    break

                                page += 1

    # 数据存储
    def record(self):
        pass

    def scrapy_comp_description(self, url, name):
        if zhilian_corps.find_one({'name': name}):
            return

        if url.find('company.zhaopin.com') == -1:
            return

        soup = None

        try:
            r = requests.post(url, headers = company_headers, timeout = 5)
            soup = BeautifulSoup(r.text)
        except:
            print('企业信息爬取有误，请检查！')
            return

        if not soup:
            return

        try:
            type = soup.find(text = re.compile('公司性质：')).parent.parent.find_next_sibling().get_text().strip()
        except:
            type = ''

        try:
            size = list(map(lambda i: int(i), re.compile('\d+').findall(soup.find(text = re.compile('公司规模：')).parent.parent.find_next_sibling().get_text().strip())))
        except:
            size = []

        try:
            industry = soup.find(text = re.compile('公司行业：')).parent.parent.find_next_sibling().get_text().strip()
        except:
            industry = ''

        try:
            description = soup.find(class_ = 'company-content').get_text().strip()
        except:
            description = ''

        try:
            location = ''
        except:
            location = ''

        item = {
            'url': url,
            'name': name,
            'type': type,
            'size': size,
            'industry': industry,
            'location': location,
            'platform': 'zhilian',
            'description': description,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        id = zhilian_corps.insert_one(item)

        if id:
            self.cd_count[name] = True
            pprint.pprint(item)
            print('-' * 40, 'insert one company', '-' * 40)

    def scrapy_job_description(self, url, keyword, industry, subIndustry):
        if url.find('jobs.zhaopin.com') == -1:
            return

        soup = None

        try:
            r = requests.get(url, headers = job_headers, cookies = self.cookies, verify = False)
            r.encoding = 'utf-8'
            soup = BeautifulSoup(r.text)
        except:
            return

        if soup is None:
            return

        try:
            name = soup.find(class_ = 'top-fixed-box').find('h1').text
        except:
            name = ''

        try:
            company = soup.find(class_ = 'top-fixed-box').find('h2').text
        except:
            company = ''

        try:
            location = soup.find(class_ = 'terminalpage').find(text = re.compile('工作地点：')).parent.find_next_sibling().text
        except:
            location = ''

        try:
            salary = list(map(lambda i: int(i), re.compile('\d+').findall(soup.find(class_ = 'terminalpage').find(text = re.compile('职位月薪：')).parent.find_next_sibling().text)))
        except:
            salary = []

        try:
            description = soup.find(class_ = 'tab-inner-cont').get_text().strip()

            if description.find('工作地址：') != -1:
                description = description[:description.find('工作地址：')].strip()
        except:
            description = ''

        try:
            exp = list(map(lambda i: int(i), re.compile('\d+').findall(soup.find(class_ = 'terminalpage').find(text = re.compile('工作经验：')).parent.find_next_sibling().text)))
        except:
            exp = []

        try:
            edu = soup.find(class_ = 'terminalpage').find(text = re.compile('最低学历：')).parent.find_next_sibling().text

            if edu == '不限':
                edu = ''
        except:
            edu = ''

        try:
            count = int(re.sub("\D", "", soup.find(class_ = 'terminalpage').find(text = re.compile('招聘人数：')).parent.find_next_sibling().text))
        except:
            count = ''

        try:
            date = soup.find(class_ = 'terminalpage').find(text = re.compile('发布日期：')).parent.find_next_sibling().text
        except:
            date = ''

        try:
            lang = ''
        except:
            lang = ''

        try:
            major = ''
        except:
            major = ''

        try:
            welfare = list(map(lambda i: i.text, soup.find(class_ = 'welfare-tab-box').select('span')))
        except:
            welfare = []

        try:
            funType = soup.find(text = re.compile('职位类别：')).parent.find_next_sibling().text
        except:
            funType = ''

        try:
            place = ''
        except:
            place = ''

        item = {
            "url": url,
            'edu': edu,
            'exp': exp,
            'name': name,
            'date': date,
            'lang': lang,
            'place': place,
            'major': major,
            'count': count,
            'salary': salary,
            'toSchool': False,
            'welfare': welfare,
            'funType': funType,
            'company': company,
            'location': location,
            'industry': industry,
            'keywords': [keyword],
            'platform': 'zhilian',
            'searchKeyword': keyword,
            'description': description,
            'subIndustry': subIndustry,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        pprint.pprint(item)

        # 此函数最后一个True表示如果此职位已存在则replace否则insert
        result = zhilian_posts.replace_one({'company': company, 'name': name, 'location': location}, item, True)

        # 如果有匹配到职位则说明是更新职位
        if result.matched_count:
            print('-' * 40, 'update one job', '-' * 40)
        else:
            print('-' * 40, 'insert one job', '-' * 40)

        self.jd_count[company + name + location] = True

    def parse_job_list(self, soup, industry, subIndustry, keyword):
        for e in soup.select('.newlist_list_content .newlist')[1:]:
            job_url = e.find(class_ = 'zwmc').find('a').get('href')
            comp_url = e.find(class_ = 'gsmc').find('a').get('href')

            location = e.find(class_ = 'gzdd').text
            name = e.find(class_ = 'zwmc').find('a').text
            company = e.find(class_ = 'gsmc').find('a').text

            if not company + name + location in self.jd_count:
                self.scrapy_job_description(job_url, keyword, industry, subIndustry)

                if not company in self.cd_count:
                    self.scrapy_comp_description(comp_url, company)

            time.sleep(random.random() * 1.8)


# if __name__ == '__main__':
#     zhilian_post = ZHILIAN_POST()

def scrapy(industry):
    ZHILIAN_POST(industry)


if __name__ == '__main__':
    info = [
        '咨询行业'
    ]

    p = Pool()

    for i in range(len(info)):
        p.apply_async(scrapy, args = (info[i],))

    p.close()
    p.join()
