#coding:utf-8
import time
from bs4 import BeautifulSoup
import re
import urllib.parse as urlparse
class HtmlParser(object):
    def __init__(self):
        self.all_data = []
        self.i = 0
        self.ra = 0
        self.jobSalary = [] #salary
        self.jobExperience = []  #Experience
        self.jobEducation = [] #Education
        self.companyName = [] #companyName
        self.jobType = []   #jobType
        self.jobPubTime = [] #jobPubTime
        # self.jobTitle = []
        # self.jobPlace = []
        self.jobBenefits = []
        self.jobKeyWord = []
        # self.companyManyLink = []
        # self.jobDescription = []
        # self.Take_Time = []

        self.Null_page = []

        self.root_1_links = []

    def parse(self,page_url,html_cont,ra):


        if page_url is None or html_cont is None:
            self.Null_page.append(str(page_url))
            return
        #HTML文档字符串，HTML解析器，HTML文档的编码
        soup = BeautifulSoup(html_cont,'html.parser',from_encoding="utf-8")

        sub_jobTitle = soup.find('span', class_="name").get_text().strip().replace("\n", "")  # jobTitle
        sub_jobPlace = soup.find('div', class_="work_addr").get_text().strip().replace("\n", "").replace(" ","").replace("　", "")  # jobplace
        sub_companyManyLink = soup.find('ul', class_="c_feature").find('a')['href']
        sub_jobDescription = soup.find('dd', class_="job_bt").get_text()
        sub_Take_Time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))



        self.test_exhibit(sub_jobTitle, sub_jobPlace, sub_companyManyLink, sub_Take_Time)


        sub_each_all = {}
        sub_each_all['jobTitle']=sub_jobTitle
        sub_each_all['jobPlace']=sub_jobPlace
        sub_each_all['companyManyLink']=sub_companyManyLink
        sub_each_all['jobDescription']=sub_jobDescription
        sub_each_all['Take_Time']=sub_Take_Time
        sub_each_all['jobSalary']=self.jobSalary[ra]
        sub_each_all['jobExperience']=self.jobExperience[ra]
        sub_each_all['jobEducation']=self.jobEducation[ra]
        sub_each_all['companyName']=self.companyName[ra]
        sub_each_all['jobType']=self.jobType[ra]
        sub_each_all['jobPubTime']=self.jobPubTime[ra]
        sub_each_all['jobBenefits']=self.jobBenefits[ra]
        sub_each_all['jobKeyWord']=self.jobKeyWord[ra]
        sub_each_all['Word_Num'] = len(sub_jobDescription)

        return sub_each_all


    def get_1_message(self , root_html_cont):
        sub_root_1_links = []  # all links of targe page of 1 type
        sub_jobExperience = []
        sub_jobEducation = []
        sub_jobBenefits = []
        sub_jobKeyWord = []

        # https://www.lagou.com/zhaopin/2/?filterOption=3
        soup = BeautifulSoup(root_html_cont, 'html.parser', from_encoding="utf-8")
        #links of 2
        links = soup.find('div', id='s_position_list').find_all('a', href=re.compile(r"/jobs/"))

        #salary,experience,education of target
        sub_jobSalary = self.loop_getContent(soup.find_all('span', class_='money'))
        combine_two = soup.find_all('div', class_='li_b_l')
        sub_companyName = self.loop_getContent(soup.find_all('div', class_='company_name'))
        sub_jobType = self.loop_getContent(soup.find_all('div', class_='industry'))
        sub_jobPubTime = self.loop_getContent(soup.find_all('span', class_='format-time'))
        sub_jobBenefits = self.loop_getContent(soup.find_all('div', class_='li_b_r'))
        q = 1
        for one in combine_two:

            if q > 0:
                f = one.get_text().strip().replace("\n", "").find(' / ')
                fa = one.get_text().strip().replace("\n", "").find('/')
                if f >= 0 :
                    a = len(one.get_text().strip().replace("\n", "").split(' / '))
                    #print("调试信息2:", f, "\n", one.get_text().strip().replace("\n", "").split(' / '),a)
                    n = one.get_text().strip().replace("\n", "").split(' / ')
                    if a == 2:
                        #sub_jobExperience.append(n[0][one.get_text().find("k",beg=one.get_text().find("k"),end=len(n[0]))])
                        sub_jobExperience.append(n[0])
                        sub_jobEducation.append(n[1])
                    elif a == 1:
                        if f.find("经验") >= 0:
                            sub_jobExperience.append(n[0])
                            sub_jobEducation.append("unknow")
                        else:
                            sub_jobExperience.append("unknow")
                            sub_jobEducation.append(n[0])
                    elif a is None:
                        sub_jobExperience.append("unknow")
                        sub_jobEducation.append("unknow")
                elif f < 0:
                    sub_jobExperience.append("unknow")
                    sub_jobEducation.append("unknow")
                q = -q
            elif q < 0:
                z = one.get_text().strip().replace("\n", "")
                za = z = one.get_text().strip()
                if len(z) is not 0:
                    sub_jobKeyWord.append(z)
                else:
                    sub_jobKeyWord.append("unkonw")
                q = -q
        # links is list of tags containning links
        # so, if i want extract all the links, i must go through it
        for link in links:
            new_root_url = link['href']
            sub_root_1_links.append(new_root_url)

        self.root_1_links.extend(sub_root_1_links)

        self.jobSalary.extend(sub_jobSalary)
        self.jobEducation.extend(sub_jobEducation)
        self.jobExperience.extend(sub_jobExperience)
        self.companyName.extend(sub_companyName)
        self.jobType.extend(sub_jobType)
        self.jobPubTime.extend(sub_jobPubTime)
        self.jobKeyWord.extend(sub_jobKeyWord)
        self.jobBenefits.extend(sub_jobBenefits)
        print("小测试", sub_root_1_links)
        #datas = soup.find_all('td')
        #获取每个页面所有的“Add_Time”
        # for add_time in datas:
        #     Add_Times_old.append(add_time.get_text())
        #     Add_Times_new = Add_Times_old[3::4]
        # self.Add_Times.extend(Add_Times_new)
        #print(datas,len(datas))#%---ceshi---%
        #获取所有的目标页面链接
        self.test_exhibit(sub_jobSalary,sub_jobExperience,sub_jobEducation,sub_companyName,sub_jobType,sub_jobPubTime,sub_jobKeyWord,sub_jobBenefits)
        # print(len(sub_jobSalary),sub_jobSalary)
        # print(len(sub_jobExperience),sub_jobExperience)
        # print(len(sub_jobEducation),sub_jobEducation)
        # print(len(sub_companyName),sub_companyName)
        # print(len(sub_jobType),sub_jobType)
        # print(len(sub_jobPubTime),sub_jobPubTime)
        return sub_root_1_links

    def loop_getContent(self,ta):
        tlist = []
        for t in ta:
            if t is not None:
                tlist.append(t.get_text().strip().replace("\n", ""))
            elif t is None:
                tlist.append("unknow")
        return tlist
    def test_exhibit(self, *show):
        for s in show:
            print(len(s), s)
#Abandon temporarlity
    def from_1_get_2(self,root_html_cont):
        root_1_links = []  # all links of targe page of 1 type
        # https://www.lagou.com/zhaopin/2/?filterOption=3
        soup = BeautifulSoup(root_html_cont, 'html.parser', from_encoding="utf-8")
        links = soup.find('div', id='s_position_list').find_all('a', href=re.compile(r"/jobs/"))
        #links is list of tags containning links
        #so, if i want extract all the links, i must go through it
        for link in links:
            new_root_url = link['href']
            root_1_links.append(new_root_url)
        print("小测试", root_1_links)

        return root_1_links

