#coding:utf-8

from bs4 import BeautifulSoup
import re
import urllib.parse as urlparse
class HtmlParser(object):
    def __init__(self):
        self.Add_Times = [] #所有发布时间、
        self.all_data = []
        self.i = 0

    def _get_new_data(self,page_url,soup):

        res_data = {} #存放数据
        res_data_name = ['Job_Title' , 'Job_Place' , 'Job_Type' ,  'Recruit_Way']
        res_data_value = []
        res_data_Job_Duties = []

        datas = soup.find_all('td' , class_ = "job-details")
        datas_list = soup.find_all('td' , class_ = "details-list")
        #print('每一个页面的数据条数：%s ' % len(datas))#%---ceshi---%
        for data in datas:
            res_data_value.append(data.get_text().strip())
        res_data = dict(map(lambda x,y:[x,y] , res_data_name , res_data_value))

        for data_list in datas_list:
            res_data_Job_Duties.append(data_list.get_text().strip())
        res_data['Job_Duties'] = res_data_Job_Duties[0].replace('%','%%')
        res_data['Job_Requirement'] = res_data_Job_Duties[1].replace('%','%%')
        #print('this res_data_Job_Duties %s ' % res_data_Job_Duties)#%---ceshi---%

        res_data['Add_Time'] = self.Add_Times[self.i]
        #print(self.i , self.Add_Times[self.i])
        self.i += 1
        #print(self.Add_Times)#%---ceshi---%
        print(res_data)#%---ceshi---%

        self.all_data.append(res_data)
        #print('^&&********* %s' % self.all_data)#%---ceshi---%
        return self.all_data

    def parse(self,page_url,html_cont):
        if page_url is None or html_cont is None:
            return
        #HTML文档字符串，HTML解析器，HTML文档的编码
        soup = BeautifulSoup(html_cont,'html.parser',from_encoding="utf-8")
        new_data = self._get_new_data(page_url,soup)
        return new_data

    def get_all_state(self , root_html_cont):
        root_links = []
        Add_Times_old = []
        Add_Times_new = []

        # http://hr.xiaomi.com/campus/list/10-0-0
        soup = BeautifulSoup(root_html_cont , 'html.parser', from_encoding="utf-8")
        links = soup.find_all('a', href=re.compile(r"/view/"))
        #links = [<a href="http://hr.xiaomi.com/campus/view/675">产品运营专员</a>,...]
        datas = soup.find_all('td')
        #获取每个页面所有的“Add_Time”
        for add_time in datas:
            Add_Times_old.append(add_time.get_text())
            Add_Times_new = Add_Times_old[3::4]
        self.Add_Times.extend(Add_Times_new)
        #print(datas,len(datas))#%---ceshi---%
        #获取所有的目标页面链接
        for link in links:
            new_root_url = link['href']
            root_links.append(new_root_url)
        #print("this add_times %s  " % self.Add_Times , len(self.Add_Times)) #%---ceshi---%

        return root_links

