#coding:utf-8

from bs4 import BeautifulSoup
import re
import urllib.parse as urlparse
class HtmlParser(object):
    def __init__(self):
        self.Add_Times = [] #所有发布时间
        self.Job_Pays = [] #薪资
        self.Job_Places = [] #工作地点
        self.Company_Names = [] #公司名
        self.Job_Titles = []#工作名

        self.all_data = []

        self.i = 0

    def _get_new_data(self,page_url,soup):

        data_span = [] #存放学历、招聘人数、工作经验
        res_data = {} #存放数据

        # datas = soup.find_all('span' , class_ = "sp4")
        datas = soup.find('p', class_="msg ltype")
        meg_lis = datas.get_text().split( )
        # print(meg_lis)
        #print(datas)#%---ceshi---%
        u = 1
        for data in meg_lis:
            if u % 2 != 0:
                data_span.append(str(data))
            u += 1
        # print("sss", data_span)
        # data_span.pop()
        #print('data_span：' ,data_span)#%---ceshi---%
        # for j in range(len(data_span)):
        #     #print(data_span[j])
        #     if data_span[j].count('i1') != 0:
        #         res_data['Work_Experience'] = datas[j].get_text() #添加工作经验
        #     elif data_span[j].count('i2') != 0:
        #         res_data['Academic'] = datas[j].get_text() #添加学历
        #     elif data_span[j].count('i3') != 0:
        #         res_data['Num_People'] = datas[j].get_text() #添加招聘人数


        if len(data_span) >= 3:
            res_data['Work_Experience'] = data_span[1] #添加工作经验
            res_data['Academic'] = data_span[2] #添加学历
            res_data['Num_People'] = data_span[3] #添加招聘人数
        else:
            res_data['Work_Experience'] = '' #工作经验为空
            res_data['Academic'] = '' #学历为空
            res_data['Num_People'] = '' #招聘人数为空
            #print('res_data:' , res_data)#%---ceshi---%


        datas_boon = soup.find('div' , class_ = "t1")
        if datas_boon is None:
            res_data['Job_Boon'] = ''
        else:
            boon_spans = datas_boon.find_all('span')#%---ceshi---%
            # print("boon_spans", boon_spans)
            boons = '$'
            for boon_span in boon_spans:
                boons += boon_span.get_text() + '$'
            res_data['Job_Boon'] = boons #福利(是一个列表

        s = soup.find('p' , class_ = "msg ltype").get_text().find('|')
        res_data['Company_Type'] = soup.find('p' , class_ = "msg ltype").get_text()[:s].strip() #公司类型


        Job_types = '$'
        datas_span_type = soup.find_all('p' , class_="at")
        print("datas_span_type1:", datas_span_type)
        if datas_span_type is None:
            res_data['Job_Type'] = ''
        else:
            if len(datas_span_type) >= 3:
                res_data['Job_Type'] = datas_span_type[2].get('title')
            else:
                res_data['Job_Type'] = ''


        DR_data_soup = soup.find('div' , class_ = "bmsg job_msg inbox")
        #DR_data_str = str(DR_data_soup)
        DR_data_txt = DR_data_soup.get_text()
        #print("DR_data_txt:",DR_data_txt)#%---ceshi---%
        #print(len(DR_data_txt))#%---ceshi---%

    #     D = ['岗位职责']
    #     R = ['岗位要求']
    # #text识别两者标题
    #     if DR_data_txt.find('岗位职责：') > 0 or DR_data_txt.find('岗位要求：') > 0:
    #         if DR_data_txt.find('岗位职责：') > 0 and DR_data_txt.find('岗位要求：') ==0:
    #             D_data = DR_data_txt[:DR_data_txt.find('岗位职责：')]
    #             print("D_data$%#@@$#$##############################@@@@@@@@@" ,D_data)

        res_data['Job_Duties'] = DR_data_txt[:int(len(DR_data_txt) / 2)]
        res_data['Job_Requirement'] = DR_data_txt[int(len(DR_data_txt) / 2):]

    #添加6个之前爬取内容
        res_data['Job_Title'] = self.Job_Titles[self.i]
        res_data['Job_Place'] = self.Job_Places[self.i]
        res_data['Add_Time'] = self.Add_Times[self.i]
        res_data['Recruit_Way'] = '社会招聘'
        res_data['Job_Pay'] = self.Job_Pays[self.i]
        res_data['Company_Name'] = self.Company_Names[self.i]
        res_data['Num'] = str(self.i)
        #print(self.i , self.Add_Times[self.i])#%---ceshi---%
        self.i += 1
        #print(self.Add_Times)#%---ceshi---%
        #print('res_data:' , res_data)#%---ceshi---%
        #print(len(res_data))

        # self.all_data.append(res_data) #汇集单页信息，目前想做成爬一条存一条，所以注释掉。
        #print('^&&********* %s' % self.all_data)#%---ceshi---%

        thisStrSum = 0
        for i in res_data:
            thisStrSum += len(res_data[i])

        return res_data,thisStrSum

    def parse(self,page_url,html_cont):
        if page_url is None or html_cont is None:
            return
        #HTML文档字符串，HTML解析器，HTML文档的编码
        soup = BeautifulSoup(html_cont,'html.parser',from_encoding="utf-8")
        new_data, thisStrSum = self._get_new_data(page_url,soup)
        return new_data ,thisStrSum

    def get_all_state(self , root_html_cont):
        root_links = []
        Add_Times_old = [] #发布时间
        Job_Pays_old = [] #薪资
        Job_Places_old = [] #工作地点
        Company_Name_old = [] #公司名
        Job_Title_old = [] #工作名


        soup = BeautifulSoup(root_html_cont , 'html.parser', from_encoding="utf-8")

    #获取所有的目标页面链接
        link_and_title = soup.find_all('p', class_ = "t1")
        for link in link_and_title:
            new_root_url=link.find('a')['href']
            root_links.append(new_root_url)
            #print('这是ps' , new_root_url)#%---ceshi---%
    #获取页面中工作名
        for name in link_and_title:
            Job_Title_old.append(name.get_text().strip())
        self.Job_Titles.extend(Job_Title_old)
        #print(self.Job_Titles , len(self.Job_Titles))

        #links = soup.find_all('a', target ="_blank")
        #links = soup.find_all('a', href=re.compile(r"http://jobs.51job.com/.+/\d+\.html\?"))
        #print(links , len(links))
        #href="http://jobs.51job.com/tianjin-hpq/92771274.html?s=01&t=0"

        Companys = soup.find_all('span' , class_ = "t2")
        places = soup.find_all('span' , class_ = "t3")
        pays = soup.find_all('span' , class_ = "t4")
        datas = soup.find_all('span' , class_ = "t5")


    #获取公司名
        for Company in Companys:
            Company_Name_old.append(Company.get_text())
        del Company_Name_old[0]
        self.Company_Names.extend(Company_Name_old)
        #print(self.Company_Names , len(self.Company_Names))

    #获取工作地点
        for place in places:
            Job_Places_old.append(place.get_text())
        del Job_Places_old[0]
        self.Job_Places.extend(Job_Places_old)
        #print(self.Job_Places,len(self.Job_Places))#%---ceshi---%
     #获取工资
        for pay in pays:
            Job_Pays_old.append(pay.get_text())
        del Job_Pays_old[0]
        self.Job_Pays.extend(Job_Pays_old)
        #print(self.Job_Pays,len(self.Job_Pays))#%---ceshi---%
    #获取当前页面所有的“Add_Time”
        for add_time in datas:
            Add_Times_old.append(add_time.get_text())
        del Add_Times_old[0]
        self.Add_Times.extend(Add_Times_old)
        #print(self.Add_Times,len(self.Add_Times))#%---ceshi---%



        #print("this add_times %s  " % self.Add_Times , len(self.Add_Times)) #%---ceshi---%

        return root_links

