import sys

sys.path.insert(0,'../')

from config import *
from lagou_config import *
import math

lagou_db = client['lagou_db']

lagou_posts, lagou_corps = lagou_db['lagou_posts'], lagou_db['lagou_corps']

#拉勾网抓取，详细页面不一定能访问（取不到位置和描述）
class LAGOU_POST():
    def __init__(self):
        self.city = [
            '北京',
            '上海',
            '深圳',
            '广州',
            '杭州',
            '成都',
            '南京',
            '武汉',
            '西安',
            '厦门',
            '长沙',
            '苏州',
            '天津',
            '重庆',
            '郑州',
            '青岛',
            '合肥',
            '福州',
            '济南',
            '大连',
            '珠海',
            '无锡',
            '佛山',
            '东莞',
            '宁波',
            '常州',
            '沈阳',
            '昆明',
            '南昌',
            '南宁',
            '海口',
            '中山',
            '惠州',
            '贵阳',
            '长春',
            '太原',
            '嘉兴',
            '泰安',
            '昆山',
            '烟台',
            '兰州',
            '泉州',
            '石家庄',
            '哈尔滨'
        ]
        self.keywords = keywords
        self.uaList = ua_list

        # 职位和企业信息统计,主要用于避免重复爬取
        self.jd_count = {}
        self.cd_count = {}

        self.login_url = login_url
        self.login_data = login_data
        self.login_headers = login_headers

        self.post_headers = post_headers
        self.post_headers2 = post_headers2

        self.company_headers = company_headers
        self.ip_scrapy_headers = ip_scrapy_headers

        self.ip_list = self.scrap_ip_list()
        self.login()

        print(lagou_posts.count())
        print(lagou_corps.count())

    def login(self):
        # session = requests.Session()
        #
        # session.get(self.login_url, data = self.login_data, headers = self.login_headers)
        # session.get('https://passport.lagou.com/grantServiceTicket/grant.html')
        # session.get('https://www.lagou.com/?action=grantST&ticket=ST-2bb71eac65d048ff859068713382ba0c', params = {
        #     'action': 'grantST',
        #     'ticket': 'ST-2bb71eac65d048ff859068713382ba0c'
        # })
        # r = session.get('https://www.lagou.com/')
        #
        # self.cookies = r.cookies

        self.begin()

    def begin(self):
        for industry, v in self.keywords.items():
            for subIndustry, config in v.items():
                for keyword in config['relative']:
                    for city in self.city:
                        page = 1
                        while page <= 30:

                            self.post_headers['X-Forwarded-For'] = self.ip_list[random.randint(0, len(self.ip_list) - 1)]
                            self.company_headers['User-Agent'] = self.post_headers['User-Agent'] = headers = self.post_headers2['User-Agent'] = random.choice(self.uaList)

                            try:
                                r = requests.get('https://www.boss.com', headers = self.post_headers2, timeout = 2)
                            except:
                                print('connect fail')
                                break
                            self.cookies = r.cookies


                            params = {
                                'hy': config['industrytype4lagou'],
                                'city': city,
                                'isSchoolJob': '0',
                                'default': 'default',
                                "needAddtionalResult": 'false'
                            }

                            self.post_headers['Referer'] = 'https://www.boss.com/jobs/list_' + parse.quote(
                                keyword) + '?city=' + parse.quote(
                                city) + '&cl=false&fromSearch=true&labelWords=&suginput='

                            try:
                                r = requests.post(
                                    'https://www.boss.com/jobs/positionAjax.json?needAddtionalResult=false&isSchoolJob=0',
                                    headers=self.post_headers, cookies=self.cookies,
                                    data={"first": 'true', 'pn': '1', 'kd': keyword}, params=params,timeout = 2)
                            except:
                                print('connect fail')
                                break

                            try:
                                resp = r.json()
                            except:
                                print('wrong json type')
                                break

                            if resp['success']:
                                totalCount = resp['content']['positionResult']['totalCount']
                                if totalCount == 0:
                                    break
                                if page<=math.ceil(totalCount/15):
                                    print('*' * 40, keyword, city, '第'+ str(page) + '页', '*' * 40)
                                    data = {
                                        'first':'true',
                                        'pn':str(page),
                                        'kd':keyword
                                    }
                                    if page > 1:
                                        data['first'] = 'false'
                                    try:
                                        r = requests.post(
                                            'https://www.boss.com/jobs/positionAjax.json?needAddtionalResult=false&isSchoolJob=0',
                                            headers=self.post_headers, cookies=self.cookies,
                                            data={"first": 'true', 'pn': str(page), 'kd': keyword}, params=params, timeout = 2)
                                    except:
                                        print('connect fail')
                                        break
                                    if not resp['success']:
                                        break
                                    try:
                                        resp = r.json()
                                    except:
                                        print('wrong json type')
                                        break
                                    try:
                                        for e in resp['content']['positionResult']['result']:
                                            if not (e['positionName'] + e['city'] + e['companyFullName']) in self.jd_count:
                                                self.scrapy_job_description(e['positionId'],e, keyword, industry, subIndustry)
                                                self.jd_count[e['positionName'] + e['city'] + e['companyFullName']] = 'exist'
                                                # time.sleep(random.uniform(0, 1))
                                                if not e['companyFullName'] in self.cd_count:
                                                    self.scrapy_company_info(e['companyId'], industry, e)
                                                    self.cd_count[e['companyFullName']] = 'exist'
                                                    # time.sleep(random.uniform(0, 1))
                                            else:
                                                print('-' * 40, 'already scanned', '-' * 40)
                                    except:
                                        print('-' * 40, 'pass page', '-' * 40)
                                else:
                                    break
                            page += 1


    def scrapy_company_info(self, companyId, industry, json):
        url = 'https://www.boss.com/gongsi/' + str(companyId) + '.html'
        try:
            r = requests.get(url, headers = self.company_headers, timeout = 2)
        except:
            print('connect fail')
            return
        soup = BeautifulSoup(r.text)
        # print(soup)

        try:
            name = json['companyFullName']
        except:
            name = ''

        try:
            type = json['financeStage']
        except:
            type = ''

        try:
            size = json['companySize'][:-1].split('-')
            size = [int(i) for i in size]
        except:
            size = []

        try:
            location = soup.find(class_ = 'mlist_li_desc').get_text().strip()
        except:
            location = ''

        try:
            description = soup.find(class_ = 'company_content').get_text().strip()
        except:
            description = ''

        item = {
            'url': url,
            'name': name,
            'type': type,
            'size': size,
            'industry': industry,
            'location': location,
            'platform': 'boss',
            'description': description,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        id = lagou_corps.insert_one(item)
        # pprint.pprint(item)
        if id:
            print('-' * 40, 'insert one company', '-' * 40)

    def scrapy_job_description(self, positionId, json, keyword, industry, subIndustry):
        url = 'https://www.boss.com/jobs/' + str(positionId) + '.html'
        try:
            r = requests.get(url, headers=self.company_headers, timeout = 2)
        except:
            print('connect fail')
            return
        soup = BeautifulSoup(r.text)
        try:
            edu = json['education']
        except:
            edu = ''

        try:
            exp = json['workYear']
            if exp == '不限':
                exp = []
            elif '-' in exp:
                exp = exp[:-1].split('-')
                exp = [int(i) for i in exp]
            else:
                exp = re.findall(r"\d", exp)
                exp = [int(i) for i in exp]
        except:
            exp = []

        try:
            name = json['positionName']
        except:
            name = ''

        try:
            company = json['companyFullName']
        except:
            company = ''

        try:
            location = json['city']
        except:
            location = ''

        try:
            tempSalary = json['salary'].split('-')
            salary = []
            for i in tempSalary:
                i = i[:-1]
                salary.append(int(i)*1000)
        except:
            salary = []

        try:
            description = soup.find(class_ = 'job_bt').get_text().strip()
        except:
            description = ''

        try:
            count = ''
        except:
            count = ''

        try:
            date = json['createTime']
        except:
            date = ''

        try:
            lang = ''
        except:
            lang = ''

        try:
            major = ''
        except:
            major = ''

        try:
            welfare = json['companyLabelList']
        except:
            welfare = []

        try:
            funType = ''
        except:
            funType = ''

        try:
            place = soup.find(class_='work_addr').get_text().strip()
            place = "".join(place.split())
            place = place.replace('查看地图','')
        except:
            place = ''

        item = {
            "url": url,
            'edu': edu,
            'exp': exp,
            'name': name,
            'date': date,
            'lang': lang,
            'place': place,
            'major': major,
            'count': count,
            'salary': salary,
            'toSchool': False,
            'welfare': welfare,
            'funType': funType,
            'company': company,
            'location': location,
            'industry': industry,
            'keywords': [keyword],
            'platform': 'boss',
            'searchKeyword': keyword,
            'description': description,
            'subIndustry': subIndustry,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        # pprint.pprint(item)

        try:
            # 此函数最后一个True表示如果此职位已存在则replace否则insert
            result = lagou_posts.replace_one({'company': company, 'name': name, 'location': location}, item, True)

            # 如果有匹配到职位则说明是更新职位
            if result.matched_count:
                print('-' * 40, 'update one job', '-' * 40)
            else:
                print('-' * 40, 'insert one job', '-' * 40)

        except:
            print('-' * 40, 'insert one fail', '-' * 40)

    def scrap_ip_list(self):
        print('get ip......')
        url = 'http://www.xicidaili.com/nn/'

        ips_list = []

        for i in range(1, 5):
            r = requests.get(url + str(i), headers = self.ip_scrapy_headers, timeout = 2)
            ips_list += re.findall(re.compile('\d+\.\d+\.\d+\.\d+'), r.text)
            time.sleep(random.uniform(1, 2))
        return ips_list


LAGOU_POST()
