import sys

sys.path.insert(0,'../')

from config import *
from yingcai_config import *

yingcai_db = client['yingcai_db']


yingcai_posts, yingcai_corps = yingcai_db['yingcai_posts'], yingcai_db['yingcai_corps']

#中华英才网抓取
class YINGCAI_POST():
    def __init__(self):
        self.jobareas = ['34,398',
        '36,400',
        '37,401',
        '25,292',
        '27,312',
        '25,291',
        '35,399',
        '16,173',
        '22,247',
        '23,264',
        '17,182',
        '11,111',
        '16,169',
        '13,133',
        '21,230',
        '24,277',
        '30,358',
        '18,193',
        '21,231',
        '25,307',
        '15,156',
        '13,134',
        '14,147',
        '25,308',
        '25,296',
        '38,402',
        '16,170',
        '28,333',
        '17,183',
        '19,210',
        '12,122',
        '11,116',
        '20,219',
        '19,211',
        '16,172',
        '25,293',
        '21,235',
        '29,342',
        '26,309',
        '11,120',
        '25,301',
        '11,112',
        '21,236',
        '42,440',
        '39,416',
        '41,435',
        '16,171',
        '22,249',
        '31,368',
        '32,382']

        self.keywords = keywords

        # 职位和企业信息统计,主要用于避免重复爬取
        self.jd_count = {}
        self.cd_count = {}
        self.begin()
        print(yingcai_posts.count())
        print(yingcai_corps.count())

    #抓取开始
    def begin(self):
        for industry, v in self.keywords.items():
            for subIndustry, config in v.items():
                for keyword in config['relative']:
                    for jobarea in self.jobareas:

                        testParams = {
                            "orderField": 'relate',
                            "keyword": keyword,
                            "city": jobarea,
                            'industrys': config['industrytype4yingcai'],
                            'page': 1,
                        }
                        try:
                            r = requests.get("http://www.chinahr.com/sou/", headers=post_headers, params=testParams,verify=False)
                        except:
                            print('requests fail......wait a while')
                            time.sleep(random.random(3))
                            break
                        soup = BeautifulSoup(r.text)
                        #判断有无职位信息
                        if not soup.find(class_='emptyList'):
                            totalPage = int(soup.find(class_='quickPage').get_text().split('/')[1])
                            page = 1
                            while page<=totalPage:

                                params = {
                                    "orderField": 'relate',
                                    "keyword": keyword,
                                    "city": jobarea,
                                    'industrys': config['industrytype4yingcai'],
                                    'page': page,
                                }
                                pprint.pprint(params)
                                try:
                                    r = requests.get("http://www.chinahr.com/sou/", headers=post_headers, params=params,
                                                 verify=False)
                                except:
                                    print('requests fail......wait a while')
                                    time.sleep(random.random(3))
                                    break
                                soup = BeautifulSoup(r.text)


                                for e in soup.select('.resultList .jobList')[0:]:
                                    try:
                                        job_url = e.find(class_='l1').find(class_='e1').find('a').get('href')
                                    except:
                                        job_url = '';
                                    try:
                                        comp_url = e.find(class_='l1').find(class_='e3').find('a').get('href')
                                    except:
                                        comp_url = ''
                                    try:
                                        location = e.find(class_='l2').find(class_='e1').get_text().split(']')[0].strip()[1:]
                                    except:
                                        location = ''
                                    try:
                                        jobName = e.find(class_='l1').find(class_='e1').get_text().strip()
                                    except:
                                        jobName = ''
                                    try:
                                        companyName = e.find(class_='l1').find(class_='e3').get_text().strip()
                                    except:
                                        companyName = ''
                                    try:
                                        companyIndustry = e.find(class_='l2').find(class_='e3').get_text().split('|')[0].strip()
                                        companyType = e.find(class_='l2').find(class_='e3').get_text().split('|')[1].strip()
                                        companySize = e.find(class_='l2').find(class_='e3').get_text().split('|')[2].strip()
                                    except:
                                        companyIndustry = companyType = companySize = ''
                                    if not (companyName + jobName + location) in self.jd_count:
                                        self.scrapy_job_description(job_url, keyword, industry, subIndustry)
                                        self.jd_count[companyName + jobName + location] = 'exist'
                                        if not companyName in self.cd_count:
                                            self.scrapy_comp_description(comp_url, companyIndustry, companyType, companySize)
                                            self.cd_count[companyName] = 'exist'
                                    else:
                                        print('-' * 40, 'already scanned', '-' * 40)
                                page = page + 1

    def scrapy_comp_description(self, url, industry , type , size):

        if url.find('www.chinahr.com/company') == -1:
            return

        soup = None

        try:
            r = requests.post(url, headers = company_headers, timeout = 5)
            soup = BeautifulSoup(r.text)
        except:
            print('企业信息爬取有误，请检查！')
            return

        if not soup:
            return

        try:
            name = soup.find(class_='infor-company').find('h1').text
        except:
            name = ''

        if yingcai_corps.find_one({'name': name}):
            return

        try:
            description = soup.find(class_='intro-company').find(class_='article').get_text().strip()
        except:
            description = ''

        try:
            location = soup.find(class_='icon_hf add').find_parent().get_text()
            location = location.replace('公司地址：','',1)
        except:
            location = ''

        item = {
            'url': url,
            'name': name,
            'type': type,
            'size': size,
            'industry': industry,
            'location': location,
            'platform': 'yingcai',
            'description': description,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        id = yingcai_corps.insert_one(item)

        if id:
            # pprint.pprint(item)
            print('-' * 40, 'insert one company', '-' * 40)

    def scrapy_job_description(self, url, keyword, industry, subIndustry):
        if url.find('www.chinahr.com/job') == -1:
            return

        soup = None

        try:
            r = requests.get(url, headers = job_headers, verify = False)
            r.encoding = 'utf-8'
            soup = BeautifulSoup(r.text)
        except:
            return

        if soup is None:
            return

        try:
            name = soup.find(class_ = 'job_name').get_text()
        except:
            name = ''

        try:
            company = soup.find(class_ = 'job-company jrpadding').find('h4').text
        except:
            company = ''

        try:
            location = soup.find(class_ = 'job_loc').get_text()
        except:
            location = ''

        try:
            salary = soup.find(class_='job_price').get_text().split('-')
            salary = [int(i) for i in salary]
        except:
            salary = []

        try:
            description = soup.find(class_ = 'job_intro_info').get_text().strip()
        except:
            description = ''

        try:
            exp = soup.find(class_='job_exp').get_text().strip()
            exp = re.findall(r"\d",exp)
            exp = [int(i) for i in exp]
        except:
            exp = []

        try:
            edu = soup.find(class_ = 'job_require').get_text().split("|")[3].strip()

            if edu == '其他':
                edu = ''
        except:
            edu = ''

        try:
            count = ''
        except:
            count = ''

        try:
            date = soup.find(class_ = 'updatetime').get_text()
            date = date.replace('更新','')
            if date == '今天':
                date = datetime.datetime.now().strftime('%Y-%m-%d')
            if date == '昨天':
                date = datetime.date.today() - datetime.timedelta(days=1)
        except:
            date = ''

        try:
            lang = ''
        except:
            lang = ''

        try:
            major = ''
        except:
            major = ''

        try:
            welfare = list(map(lambda i: i.text, soup.find(class_ = 'job_fit_tags').select('li')))
        except:
            welfare = []

        try:
            funType = ''
        except:
            funType = ''

        try:
            place = ''
        except:
            place = ''

        item = {
            "url": url,
            'edu': edu,
            'exp': exp,
            'name': name,
            'date': date,
            'lang': lang,
            'place': place,
            'major': major,
            'count': count,
            'salary': salary,
            'toSchool': False,
            'welfare': welfare,
            'funType': funType,
            'company': company,
            'location': location,
            'industry': industry,
            'keywords': [keyword],
            'platform': 'yingcai',
            'searchKeyword': keyword,
            'description': description,
            'subIndustry': subIndustry,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        # pprint.pprint(item)

        try:
            # 此函数最后一个True表示如果此职位已存在则replace否则insert
            result = yingcai_posts.replace_one({'company': company, 'name': name, 'location': location}, item, True)

            # 如果有匹配到职位则说明是更新职位
            if result.matched_count:
                print('-' * 40, 'update one job', '-' * 40)
            else:
                print('-' * 40, 'insert one job', '-' * 40)

        except:
            print('-' * 40, 'insert one fail', '-' * 40)


yingcai_posts = YINGCAI_POST();