from bs4 import BeautifulSoup

class HtmlParser(object):
    def __init__(self):
        self.Add_Times = '' #发布时间
        self.Policy_Names = [] #标题
        self.Policy_Content = '' #内容
        self.Type = ''#类型

        self.all_data = {}

    def _get_new_data(self,url,soup,i):


        self.Add_Times = '' #发布时间
        self.Policy_Content = '' #内容
        self.Type = ''#类型
        title_now = ''

        self.Policy_Content = soup.find('div', id="content_content").get_text()
        #print('a test point@1',datas)
        print(len(self.Policy_Content))
        self.Add_Times = (soup.find('span',class_='time').get_text())[5:]
        #print('a test point@2',self.Add_Times)
        self.Type = url[27:url.find('/',27,len(url))]
        #print('1234789',self.Type)

        self.all_data['Title'] = self.Policy_Names[i]
        self.all_data['Time'] = self.Add_Times
        self.all_data['Content'] = self.Policy_Content
        self.all_data['Type'] = self.Type


        #print('#$%^&*(*&^%$@#$%^&*()_+',self.Policy_Content.strip())

        return self.all_data

    def parse(self,page_url,html_cont,i):
        if page_url is None or html_cont is None:
            return
        #HTML文档字符串，HTML解析器，HTML文档的编码
        soup = BeautifulSoup(html_cont,'html.parser',from_encoding="utf-8")

        new_data = self._get_new_data(page_url, soup , i)
        return new_data

    def get_all_state(self , root_html_cont):
        root_links = []
        Policy_Title = [] #政策名


        soup = BeautifulSoup(root_html_cont , 'html.parser', from_encoding="utf-8")

    #获取所有的目标页面链接
        link_and_title = soup.find_all('li', class_ = "news-li")
        #print(link_and_title)
        for link in link_and_title:
            new_root_url=link.find('a')['href']

            #print('A test point_5',new_root_url)

            new_url_name=link.find('a')['title']
            if(new_root_url.find('http://www.chinajob.gov.cn/')<0):
                if(new_root_url.find('../') == 0):
                    new_root_url_q = new_root_url[3:]
                    new_root_url = 'http://www.chinajob.gov.cn/' + new_root_url_q
                elif(new_root_url.find('content') == 0):
                    new_root_url = 'http://www.chinajob.gov.cn/EmploymentServices/' + new_root_url

                #print('A test point,html_parser@get_all_state?4',new_root_url)

            # data = urllib.request.urlopen(new_root_url).read()
            # data = data.decode('utf-8')
            # print('gjuxia' , data)
            root_links.append(new_root_url)
            Policy_Title.append(new_url_name)
            self.Policy_Names.extend(Policy_Title)
            #print('Each of the target policy name,html_parser@get_all_state?1' , Policy_Title)#%---ceshi---%
            #print('Each of the target url,html_parser@get_all_state?2' , new_root_url)#%---ceshi---%
        print('A list of target urls,html_parser@get_all_state?3',root_links,'\n',len(root_links))
        print('A list of policy name,html_parser@get_all_state?4',self.Policy_Names,'\n',len(root_links))

        return root_links
