# -*-coding:utf8-*-

import sys

sys.path.insert(0, '../')

from config import *
from dajie_config import *

dajie_db = client['dajie_db']

# dajie_db.drop_collection('dajie_posts')
# dajie_db.drop_collection('dajie_corps')

dajie_posts, dajie_corps = dajie_db['dajie_posts'], dajie_db['dajie_corps']


# 实习模块
class DAJIE_INTERN():
    def __init__(self):
        self.login()

    def login(self):
        self.begin()

    def begin(self):
        try:
            r = requests.post('https://job.dajie.com/job/intern/internSearch?_CSRFToken=', data = {'ajax': 1, 'page': 1})
            resp = r.json()
        except:
            return

        totalPage = resp['data']['totalPages'] if 'data' in resp and 'totalPages' in resp['data'] else 0

        if totalPage == 0:
            return

        for i in range(2, totalPage + 1):
            try:
                r = requests.post('https://job.dajie.com/job/intern/internSearch?_CSRFToken=', data = {'ajax': 1, 'page': i})
                self.parse_job_list(r.json()['data']['list'])
            except:
                continue

    def parse_job_list(self, data):
        for job in data:
            self.scrapy_job_description(job)

            time.sleep(random.random())

    def scrapy_job_description(self, job):
        headers = {
            'authority': 'job.dajie.com',
            'method': 'GET',
            'scheme': 'https',
            'path': job['jobUrl'].replace('//job.dajie.com', ''),
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
            'cache-control': 'no-cache',
            'pragma': 'no-cache',
            'referer': 'https://job.dajie.com/job/intern',
            'upgrade-insecure-requests': '1',
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
        }

        jobUrl = 'https:' + job['jobUrl']

        r = requests.get(jobUrl, headers = headers)
        soup = BeautifulSoup(r.text)

        try:
            edu = soup.find(class_ = 'edu').get_text().strip()
        except:
            edu = ''

        try:
            count = soup.find(class_ = 'recruiting').get_text().strip()
        except:
            count = ''

        try:
            welfare = soup.find(class_ = 'job-msg').find(class_ = 'job-msg-bottom').find('ul').get_text().strip().split()
        except:
            welfare = []

        try:
            industry = soup.find(class_ = 'i-corp-base-info').find(class_ = 'info').find(text = re.compile('行业')).find_next_sibling().text
        except:
            industry = ''

        item = {
            'url': jobUrl,
            'edu': edu,
            'exp': [],
            'name': job.get('jobName', ''),
            'date': job.get('createDate', ''),
            'lang': '',
            'place': '',
            'major': '',
            'count': count,
            'salary': job.get('salary'),
            'toSchool': True,
            'welfare': welfare,
            'funType': '',
            'company': job.get('corpName', ''),
            'location': job.get('citys', ''),
            'keywords': [],
            'industry': industry,
            'subIndustry': '',
            'searchKeyword': '',
            'platform': 'dajie',
            'description': '',
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }


class DAJIE_POST():
    def __init__(self):
        # 职位和企业信息统计,主要用于避免重复爬取
        self.jd_count = {}
        self.cd_count = {}

        self.login()

    def login(self):
        self.begin()

    def begin(self):
        for industry, v in keywords.items():
            for subIndustry, config in v.items():
                for keyword in config['relative']:
                    try:
                        r = requests.get(login_url, params = {'keyword': keyword, 'from': 'job', 'clicktype': 'blank'}, headers = login_headers)
                    except:
                        print('网络请求错误!')
                        continue

                    self.cookies = r.cookies

                    curr_page = 1

                    while True:
                        post_params['page'] = str(curr_page)
                        post_params['keyword'] = keyword

                        post_headers['path'] = '/job/ajax/search/filter?' + parse.urlencode(post_params)
                        post_headers['referer'] = "https://so.dajie.com/job/search?" + parse.urlencode({"keyword": keyword, "from": "job", "clicktype": "blank"})

                        try:
                            r = requests.get(post_url, headers = post_headers, params = post_params, cookies = self.cookies)
                            self.cookies.update(r.cookies)
                        except:
                            print('no response!')
                            break

                        size = ['1 - 49人', '50 - 99人', '100 - 499人', '500 - 999人']

                        try:
                            data = r.json()['data']['list']
                        except:
                            data = []

                        if len(data) == 0:
                            print('no data at all')

                        for job in data:
                            # 过滤掉规模太小的企业
                            if not 'scaleName' in job or job['scaleName'] in size:
                                continue

                            # 避免重复爬取
                            key = job.get('compName', '') + job.get('jobName', '') + job.get('pubCity', '')

                            if key in self.jd_count:
                                continue

                            self.scrapy_job_description(job, keyword, industry, subIndustry)
                            self.scrapy_comp_description(job)

                            time.sleep(random.random() * 1.5)

                        try:
                            if curr_page >= r.json()['data']['totalPage']:
                                break
                        except:
                            break

                        curr_page += 1

                        time.sleep(random.random() * 3)

    def scrapy_job_description(self, job, keyword, industry, subIndustry):
        if not 'jobHref' in job:
            return

        try:
            salary = list(map(lambda i: int(i) * 1000, re.compile('\d+').findall(job.get('salary'))))
        except:
            salary = []

        if job.get('pubEx', '') == '不限' or job.get('pubEx', '') == '':
            exp = []
        else:
            exp = list(map(lambda i: int(i), re.compile('\d+').findall(job.get('pubEx', ''))))

        item = {
            'url': 'http:' + job.get('jobHref', ''),
            'edu': job.get('pubEdu', ''),
            'exp': exp,
            'name': job.get('jobName', ''),
            'date': job.get('time', ''),
            'lang': '',
            'place': '',
            'major': '',
            'count': '',
            'salary': salary,
            'salary2': job.get('salary'),
            'toSchool': False,
            'welfare': '',
            'funType': '',
            'company': job.get('compName', ''),
            'location': job.get('pubCity', ''),
            'keywords': [keyword],
            'industry': industry,
            'subIndustry': subIndustry,
            'searchKeyword': keyword,
            'platform': 'dajie',
            'description': '',
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        try:
            r = requests.get(item['url'], headers = detail_headers)
        except:
            return

        try:
            soup = BeautifulSoup(r.text)
        except:
            soup = None

        if not soup:
            return

        item['description'] = soup.find(id = 'jp_maskit').get_text()
        item['welfare'] = soup.find(class_ = 'job-msg').find(class_ = 'job-msg-bottom').find('ul').get_text().strip().split()
        item['count'] = int(re.sub("\D", "", soup.find(class_ = 'recruiting').get_text())) if re.match('\d+', soup.find(class_ = 'recruiting').get_text()) else ''

        result = dajie_posts.replace_one({'company': job.get('compName', ''), 'name': job.get('jobName', ''), 'location': job.get('pubCity', '')}, item, True)

        pprint.pprint(item)

        # 如果有匹配到职位则说明是更新职位
        if result.matched_count:
            print('-' * 40, 'update one job', '-' * 40)
        else:
            print('-' * 40, 'insert one job', '-' * 40)

        key = job.get('compName', '') + job.get('jobName', '') + job.get('pubCity', '')

        self.jd_count[key] = True

    def scrapy_comp_description(self, job):
        compName, industryName, scaleName, pubCity, corpId = job.get('compName', ''), job.get('industryName', ''), job.get('scaleName', ''), job.get('pubCity', ''), job.get('corpId', '')

        if dajie_corps.find_one({'name': compName}):
            return

        if corpId == '':
            return

        url = 'https://www.dajie.com/corp/' + str(corpId) + '/index/intro'

        company_headers['path'] = '/corp/' + str(corpId) + '/index/intro'

        try:
            soup = BeautifulSoup(requests.get(url, headers = company_headers, cookies = self.cookies).text)
        except:
            soup = None

        if not soup:
            return

        try:
            if soup.find(class_ = 'cor-container').find(class_ = 'li-wrap'):
                description = soup.find(class_ = 'cor-container').find(class_ = 'li-wrap').get_text().strip()
            elif soup.find(class_ = 'cor-container').find(class_ = 'cor-introduce'):
                description = soup.find(class_ = 'cor-container').find(class_ = 'cor-introduce').get_text().strip()
            else:
                description = ''
        except:
            description = ''

        try:
            type = soup.find(class_ = 'cor-table').find(text = re.compile('性质')).parent.parent.find_next_sibling().select('td')[2].text.strip()
        except:
            type = ''

        try:
            size = int(re.sub("\D", "", scaleName))
        except:
            size = 0

        item = {
            'url': url,
            'type': type,
            'size': size,
            'name': compName,
            'platform': 'dajie',
            'location': pubCity,
            'industry': industryName,
            'description': description,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        dajie_corps.insert_one(item)

        pprint.pprint(item)
        print('-' * 40, 'insert one company', '-' * 40)


if __name__ == '__main__':
    DAJIE_POST()
    # DAJIE_INTERN()
