# -*- coding:utf-8 -*-
import urlparse,re,os
from bs4 import BeautifulSoup
import funcs
class Item(object):
    pass
S_root_dir='D:/tmp/media/html/delta/csdn'
S_save_root_dir='D:/tmp1'
S_delta_url_prefix='/media/html/delta'
S_main_url_prefix='/media/html/template'
class Parse(object):
    def __init__(self):
        self.item=Item()
        self.url_prefix_main='/media/html/template'
    def on_get_link(self,soup,field,href):
        urls=[]
        for f in soup.find_all(field):
            url=f.get(href,'').strip()
            if url:
                urls.append(url)
        return urls

    def get_link(self,soup):
        urls=self.on_get_link(soup,'a','href')
        css_urls=self.on_get_link(soup,'link','href')
        js_urls=self.on_get_link(soup,'script','src') 
        img_urls=self.on_get_link(soup,'img','src')
        return urls,css_urls,js_urls,img_urls

    def decompose_element(self,soup,field,_class='',_id=''):
        if _class:
            flist=soup.select('%s[class="%s"]'%(field,_class))
            for element in flist:
                element.decompose()
        elif _id:
            flist=soup.select('%s[id="%s"]'%(field,_id))
            for element in flist:
                element.decompose()
        else:
            flist=soup.find_all(field)
            for element in flist:
                element.decompose()
        return soup
    def clear_element(self,soup,field,_class='',_id=''):
        if _class:
            flist=soup.select('%s[class="%s"]'%(field,_class))
            for element in flist:
                element.clear()
        elif _id:
            flist=soup.select('%s[id="%s"]'%(field,_id))
            for element in flist:
                element.clear()
        else:
            flist=soup.find_all(field)
            for element in flist:
                element.clear()
        return soup
    def both_need_clear(self,soup):
        soup=self.decompose_element(soup,'script')  
        return soup
    def csdn_clear(self,soup):
        soup=self.clear_element(soup,'div',_class='nav')                #顶部
        soup=self.decompose_element(soup,'script',_id='frm_tt1')        #顶部
        soup=self.clear_element(soup,'table',_class='comt')             #底部留言框
        return soup

    def iteye_clear(self,soup):
        return soup
    
    def check_dir_path(self,dir_path):
        if os.path.isdir(dir_path):
            return True
        else:
            try:
                os.makedirs(dir_path)
                return True
            except:
                return False
            
    def parse(self,url,path):
        f=open(path)
        data=f.read()
        f.close()
        
        data=data.decode('utf-8')
        soup=BeautifulSoup(str(data),'html5lib',from_encoding='utf-8')
        urls,css_urls,js_urls,img_urls=self.get_link(soup)
        all_hrefs=css_urls+js_urls+urls+img_urls
            
        self.item.url=url
        self.item.content=str(soup)                      #使用修改后的数据
        #self.item.coding=coding                          #内容编码
        self.item.all_hrefs=all_hrefs
        self.item.file_length=int(len(data))    #原始文件大小

        self.update_css_js()
        content=str(self.item.content)
        #print dd
        
        patt='[\s\S]+?(/media/html/[\s\S]+)'
        m=re.search(patt,path)
        if m:
            save_path=S_save_root_dir+m.group(1)
        else:
            print path,'is a wrong path'
            save_path=''
        
        self.check_dir_path(os.path.dirname(save_path))
        f=open(save_path,'w')
        f.write(content)
        f.close()
        
    def update_css_js(self,):
        #替换css和js和部分链接
        for href in self.item.all_hrefs:
            turl=href.split('#')[0].strip()         #去掉锚点
            if turl=='' or turl.startswith('javascript') or turl.startswith('#'):
                continue
            
            ext=funcs.get_url_ext(href).lower()
            if ext in ('css','js'):
                c_href=urlparse.urljoin(self.item.url,href)
                sub_url=urlparse.urlparse(c_href)[2]       #使http://www.csdn.net/../sdf变成http://www.csdn.net/sdf
                if sub_url.startswith('/../'):
                    sub_url=sub_url[3:]
                c_href=self.url_prefix_main+'/css_js'+sub_url
                patt_prefix='href'
            elif ext in ('jpg','png','gif','jpeg'):
                c_href=urlparse.urljoin(self.item.url,href)
                sub_url=urlparse.urlparse(c_href)[2]       #使http://www.csdn.net/../sdf变成http://www.csdn.net/sdf
                if sub_url.startswith('/../'):
                    sub_url=sub_url[3:]
                domain_name=funcs.url2domain(c_href)
                c_href=self.url_prefix_main+'/'+domain_name+sub_url
                patt_prefix='src'
            else:
                c_href=urlparse.urljoin(self.item.url,href)
                if len(re.findall('/',c_href))==2:         #使http://www.csdn.net变成http://www.csdn.net/
                    c_href=c_href+'/'
                    
                domain_name=funcs.url2domain(c_href)
                c_idx=funcs.get_md52int(c_href)
                c_href=self.url_prefix_main+'/'+domain_name+urlparse.urlparse(c_href)[2]
                dir_path=os.path.dirname(c_href)
                c_href=dir_path+'/'+str(c_idx)+'.html'
                patt_prefix='href'
            try:
                p_href=funcs.get_re_patt(href)
                patt=patt_prefix+'=[\'"]'+p_href+'[\'"]'
                c_href=patt_prefix+'="'+c_href+'"'
                self.item.content=re.sub(patt,c_href,self.item.content)                
            except Exception,e:
                print 'update_css_js:'+str(e)
        return self.item
    
def get_file_path(root_dir):
    file_list=os.walk(root_dir)
    for root,dirs,files in file_list:
        for f in files:
            path=os.path.join(root,f)
            ext=os.path.splitext(path)[-1][1:]
            if ext in ('html',):
                yield path
                
if __name__=='__main__':
    file_list=get_file_path(S_root_dir)
    try:
        while True:
            path=file_list.next()
            path=path.replace('\\','/').strip()
            patt=S_root_dir+'/'+'([\s\S]+)'
            m=re.search(patt,path)
            if m:
                url='http://'+m.group(1)                
                p=Parse()
                p.parse(url,path)       
    except StopIteration:
        print 'end'





























