import sys

sys.path.insert(0, '../')

from config import *
from qiancheng_config import *

qiancheng_db = client['qiancheng_db']

# qiancheng_db.drop_collection('qiancheng_posts')
# qiancheng_db.drop_collection('qiancheng_corps')

qiancheng_posts, qiancheng_corps = qiancheng_db['qiancheng_posts'], qiancheng_db['qiancheng_corps']

subIndustrys = []


# subIndustrys = ['产品', '技术', '会计', '教育', '法律']
#
# for indu in subIndustrys:
#     qiancheng_posts.delete_many({'subIndustry': indu})


class QIANCHENGWUYOU_POST():
    def __init__(self, industry):
        self.industry = industry

        # 城市
        self.jobareas = [
            '080200,010000,020000,030200,040000',
            '180200,200200,070200,090200,060000',
            '030800,230300,230200,070300,250200',
            '190200,150200,080300,170200,050000',
            '110200,240200,220200,120200,120300'
        ]

        # 岗位
        self.keywords = {}

        self.keywords[industry] = keywords[industry]

        # 企业规模在5000人以上
        self.companysize = ['05,06,07']

        # 企业类型
        self.cotype = ['01,02,03,04,10']

        # 职位和企业信息统计,主要用于避免重复爬取
        self.jd_count = {}
        self.cd_count = {}

        self.login()

    def login(self):
        # r = requests.post(login_url, data = login_data, headers = login_headers, params = {
        #     'lang': 'c',
        #     'url': 'http://www.51job.com/?from=baidupz'
        # })
        #

        # r = requests.get('http://www.51job.com/', params = index_params)

        # self.cookies = r.cookies
        self.cookies = dict()

        self.begin()

    def parse_job_list(self, soup, keyword, industry, kind):
        jobs = soup.select('.dw_table .el .t1 span a')
        comps = soup.select('.dw_table .el .t2  a')
        areas = soup.select('.dw_table .el .t3')[1:]
        dates = soup.select('.dw_table .el .t5')[1:]

        if len(jobs) == 0 or len(comps) == 0 or len(areas) == 0 or len(dates) == 0:
            return

        for job, comp, area, date in zip(jobs, comps, areas, dates):
            jd_url = job.get('href')
            cd_url = comp.get('href')

            # 不抓取站外页面
            if jd_url.find('jobs.51job.com') == -1:
                continue

            company = comp.get('title').strip()
            name = job.get('title').strip()
            location = area.text.strip()

            #  抓取职位信息并存储,如果此职位对应的企业还没有抓取则一并抓取并存储
            if not company + name + location in self.jd_count:
                self.scrapy_job_description(jd_url, keyword, industry, kind)

                # 抓取企业信息并保存
                if not company in self.cd_count:
                    self.scrapy_comp_description(cd_url, company)

                time.sleep(random.random())

    def begin(self):
        for industry, v in self.keywords.items():
            for kind, config in v.items():
                if len(subIndustrys) and kind not in subIndustrys:
                    continue
                for keyword in config['relative']:
                    for jobarea in self.jobareas:
                        for companysize in self.companysize:
                            for cotype in self.cotype:
                                r = requests.post(post_url, headers = post_headers, params = {
                                    "fromJs": '1',
                                    "jobarea": jobarea,
                                    "keyword": keyword.encode('gbk'),
                                    "companysize": companysize,
                                    "cotype": cotype,
                                    "keywordtype": '2',
                                    "lang": 'c',
                                    "stype": '2',
                                    "postchannel": '0000',
                                    "fromType": '1',
                                    "confirmdate": '9'
                                }, data = {
                                    "lang": 'c',
                                    "stype": '2',
                                    "postchannel": '0000',
                                    "fromType": '1',
                                    "line": "",
                                    "confirmdate": '9',
                                    "from": "",
                                    "keywordtype": '2',
                                    "keyword": keyword.encode('gbk'),
                                    "jobarea": jobarea,
                                    "companysize": companysize,
                                    "cotype": cotype,
                                    "industrytype": ','.join(config['industrytype']) if len(config['industrytype']) else "",
                                    "funtype": ','.join(config['funtype']) if len(config['funtype']) else ''
                                }, cookies = self.cookies)

                                self.cookies.update(r.cookies)

                                soup = BeautifulSoup(r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8'))

                                self.parse_job_list(soup, keyword, industry, kind)

                                # 设置参数
                                for k in post_data:
                                    post_data[k] = soup.select('input[name="' + k + '"]')[0].get('value')

                                for k in post_params:
                                    post_params[k] = soup.select('input[name="' + k + '"]')[0].get('value')

                                curr_page = 2

                                while True:
                                    print('-' * 120)
                                    print('industry:', industry)
                                    print('kind:', kind)
                                    print('keyword:', keyword)
                                    print('curr_page:', curr_page)
                                    print('-' * 120)

                                    post_data['industrytype'] = ','.join(config['industrytype']) if len(config['industrytype']) else ""
                                    post_data['funtype'] = ','.join(config['funtype']) if len(config['funtype']) else ''

                                    post_params['keyword'] = keyword.encode('gbk')
                                    post_data['keyword'] = keyword.encode('gbk')

                                    post_params['keyword'] = keyword.encode('gbk')
                                    post_data['keyword'] = keyword.encode('gbk')

                                    post_params['curr_page'] = str(curr_page)
                                    post_data['curr_page'] = str(curr_page)

                                    post_data['companysize'] = companysize

                                    post_params['jobarea'] = jobarea
                                    post_data['jobarea'] = jobarea

                                    post_data['cotype'] = cotype

                                    time.sleep(random.random())

                                    r = requests.post(post_url, headers = post_headers, params = post_params, data = post_data, cookies = self.cookies)

                                    self.cookies.update(r.cookies)

                                    # if r.text.find('dw_nomsg') != -1:
                                    #     print('no result', curr_page)
                                    #     continue
                                    try:
                                        soup = BeautifulSoup(r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8'))
                                    except:
                                        print(curr_page, '编码错误!')

                                    self.parse_job_list(soup, keyword, industry, kind)

                                    pages = int(re.sub("\D", "", soup.find(class_ = 'p_in').find(class_ = 'td').text))

                                    curr_page += 1

                                    if curr_page > pages or curr_page >= 100:
                                        print('访问完毕')
                                        break

                        time.sleep(random.random() * 10)

    def record(self, item):
        pprint.pprint(item)

    def scrapy_job_description(self, url, keyword, industry, kind):
        if url.find('jobs.51job.com') == -1:
            return

        soup = None

        try:
            r = requests.get(url, headers = jd_headers, params = {'s': '01', 't': '0'})
            soup = BeautifulSoup(r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8'))
        except:
            return

        if not soup:
            return None

        try:
            name = soup.find(class_ = 'tHjob').find('h1').text
        except:
            name = ''

        try:
            company = soup.find(class_ = 'cname').find('a').text
        except:
            company = ''

        try:
            location = soup.find(class_ = 'lname').text
        except:
            location = ''

        try:
            salary = soup.find(class_ = 'lname').find_next_sibling().text.strip()

            if salary.find('千/月') != -1 or salary.find('千以下/月') != -1 or salary.find('千以上/月') != -1:
                salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 1000)
            elif salary.find('万/月') != -1 or salary.find('万以上/月') != -1 or salary.find('万以下/月') != -1:
                salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) * 10000)
            elif salary.find('万/年') != -1 or salary.find('万以上/年') != -1 or salary.find('万以下/年') != -1:
                salary = list(np.array(re.match('[\d\.\-]+', salary).group().split('-')).astype(float) / 12 * 10000)
            elif salary.find('元/天') != -1 or salary.find('元以上/天') != -1 or salary.find('元以下/天') != -1:
                salary = list(np.array(re.match('[\d\.\-]+', '200-400/天').group().split('-')).astype(float) * 30)
            else:
                salary = []
        except:
            salary = []

        try:
            description = soup.find(class_ = 'job_msg').text.strip()

            if description.find('职能类别') != -1:
                description = description[:description.find('职能类别')].strip()
        except:
            description = ''

        try:
            exp = soup.select('.jtag.inbox .t1 .i1')[0].parent.get_text()

            if exp.find('无') != -1:
                exp = []
            else:
                exp = re.findall(re.compile('\d+'), exp)
        except:
            exp = []

        try:
            edu = soup.select('.jtag.inbox .t1 .i2')[0].parent.get_text()
        except:
            edu = ''

        try:
            count = int(re.sub("\D", "", soup.select('.jtag.inbox .t1 .i3')[0].parent.get_text()))
        except:
            count = ''

        try:
            date = soup.select('.jtag.inbox .t1 .i4')[0].parent.get_text().replace('发布', '')
        except:
            date = ''

        try:
            lang = soup.select('.jtag.inbox .t1 .i5')[0].parent.get_text()
        except:
            lang = ''

        try:
            major = soup.select('.jtag.inbox .t1 .i6')[0].parent.get_text()
        except:
            major = ''

        try:
            welfare = soup.select('.jtag.inbox .t2')[0].get_text().strip().split('\n')
        except:
            welfare = []

        try:
            funType = soup.find(text = re.compile('职能类别')).parent.find_next_sibling().text
        except:
            funType = ''

        try:
            place = soup.find(text = re.compile('上班地址')).parent.parent.get_text().strip().split('：')[1]
        except:
            place = ''

        keywords = [keyword]

        try:
            keywords += soup.find(class_ = 'job_msg').find(text = re.compile('关键字：')).parent.parent.get_text().strip().replace('关键字：', '').split()
            keywords = list(set(keywords))
        except:
            pass

        if description == '':
            return None

        item = {
            'url': url,
            'edu': edu,
            'exp': exp,
            'name': name,
            'date': date,
            'lang': lang,
            'place': place,
            'major': major,
            'count': count,
            'salary': salary,
            'toSchool': False,
            'welfare': welfare,
            'funType': funType,
            'company': company,
            'location': location,
            'keywords': keywords,
            'platform': 'qiancheng',
            'description': description,
            'stime': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'industry': industry,
            'subIndustry': kind,
            'searchKeyword': keyword,
            'salary2': soup.find(class_ = 'lname').find_next_sibling().text.strip()
        }

        # 此函数最后一个True表示如果此职位已存在则replace否则insert
        result = qiancheng_posts.replace_one({'company': company, 'name': name, 'location': location}, item, True)

        pprint.pprint(item)

        # 如果有匹配到职位则说明是更新职位
        if result.matched_count:
            print('-' * 40, 'update one job', '-' * 40)
        else:
            print('-' * 40, 'insert one job', '-' * 40)

        self.jd_count[company + name + location] = True

    def scrapy_comp_description(self, url, name):
        # 监测是否已爬取
        if qiancheng_corps.find_one({'name': name}):
            return

        if url.find('jobs.51job.com') == -1:
            return

        try:
            r = requests.post(url, headers = company_headers, timeout = 5)
        except:
            print('企业信息爬取有误，请检查！')
            return

        soup = None

        try:
            soup = BeautifulSoup(r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8'))
        except:
            pass

        if not soup:
            return None

        try:
            name = soup.find('h1').text
        except:
            name = ''

        try:
            type = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[0]
        except:
            type = ''

        try:
            size = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[2]
        except:
            size = ''

        try:
            industry = soup.find(class_ = 'tHeader').find(class_ = 'ltype').text.split()[4]
        except:
            industry = ''

        try:
            description = soup.find(class_ = 'con_msg').find(class_ = 'in').find('p').text.strip()
        except:
            description = ''

        try:
            location = soup.find(text = re.compile('公司地址')).parent.parent.get_text().replace('公司地址：', '').strip()
        except:
            location = ''

        item = {
            'name': name,
            'type': type,
            'url': url,
            'size': size,
            'industry': industry,
            'location': location,
            'platform': 'qiancheng',
            'description': description,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        if not qiancheng_corps.find_one({'name': name}):
            id = qiancheng_corps.insert_one(item)

            if id:
                self.cd_count[name] = True
                pprint.pprint(item)

                print('-' * 40, 'insert one company', '-' * 40)

    def scrapy_hot_city_list(self):
        url = 'http://jobs.51job.com/'

        r = requests.get(url)

        page_source = r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8')
        soup = BeautifulSoup(page_source)

        f = open('../hot_city.txt', 'w')

        for e in soup.find(class_ = 'filter').select('.e.e4')[0].select('.lkst a'):
            print(e.text)
            f.write(e.text + '\n')

        f.close()

    def scrapy_main_job_name_list(self):
        url = 'http://jobs.51job.com/'

        r = requests.get(url)

        page_source = r.text.encode("iso-8859-1").decode('gbk').encode('utf8').decode('utf8')
        soup = BeautifulSoup(page_source)

        f = open('../main_jobs.txt', 'w')

        for e in soup.select('.filter .e.e5 .lkst a'):
            if e.text == '其他':
                continue

            f.write(e.text + '\n')

        f.close()


def scrapy(industry):
    QIANCHENGWUYOU_POST(industry)


if __name__ == '__main__':
    info = [
        '金融行业', '快消房产行业', 'IT互联科技行业', '咨询行业', '制造行业'
    ]

    p = Pool()

    for i in range(len(info)):
        p.apply_async(scrapy, args = (info[i],))

    p.close()
    p.join()
