from bs4 import BeautifulSoup
import requests,re

class UrlAnalysis(object):
    page_url=[]
    temp_url=[]
    
    def __init__(self):
        self.sever='https://tieba.baidu.com' 
    def _get_new_urls(self,url,num):
        try:
            req=requests.get(url,timeout=0.001)
            if req.status_code==200: 
                strs=req.text.encode(req.encoding).decode(req.apparent_encoding)
                div_bf=BeautifulSoup(strs)
                a=div_bf.find_all('a')
                for each in a:
                    bt=each.get('href')
                    if bt.find('http')==-1:
                        bt=self.sever+bt
                    self.temp_url.append(bt)
                    self.temp_url.append(num)
                    t=self.temp_url.copy()
                    self.page_url.append(t)
                    self.temp_url.clear()
            else:
                return '此网页错误'
        except:
            return 0
        
        return 1
        
        
    def _download_html(self,url):
        try:
            req=requests.get(url,timeout=0.001)
            if req.status_code==200: 
                strs=req.text.encode(req.encoding).decode(req.apparent_encoding)
                a_bf=BeautifulSoup(strs)
                ds=a_bf.find_all('title')#获取标题
                soup = BeautifulSoup(str(ds[0]),'html.parser')#去掉title标签
                bf=soup.get_text()
                bf=bf+'.html'
                files=open(bf,'w',encoding=req.apparent_encoding)
                files.write(strs)
                files.close()
                print('title:%s网页下载完成'%bf)
            else:
                return
        except:
            pass
        finally:
            return
      
    def _download_chinese_characters(self,url):
        try:
            req=requests.get(url,timeout=0.001)
            if req.status_code==200: 
                strs=req.text.encode(req.encoding).decode(req.apparent_encoding)
                a_bf=BeautifulSoup(strs)
                strss=a_bf.find_all('body')#获取主体内容
                bs=a_bf.find_all('script')#获取script
                cs=a_bf.find_all('style')#获取style
                str(strss).replace(str(bs),'')#将script内容去掉
                str(strss).replace(str(cs),'')#将style内容去掉
                ds=a_bf.find_all('title')#获取标题
                soup = BeautifulSoup(str(ds[0]),'html.parser')#去掉title标签
                cons=BeautifulSoup(str(strss[0]),'html.parser')#去掉内容中的标签
                bf=soup.get_text()
                content=cons.get_text()
                bf=bf+'.txt'
                files=open(bf,'w',encoding='utf-8')
                files.write(content)
                files.close()
                print('title:%s文本下载完成'%bf)
            else:
                return
        except:
            pass
        finally:
            return
        

