# -*- coding:utf-8 -*-
import os,urlparse,datetime,re,random

import settings
import funcs

class Pipeline(object):
    def __init__(self,cur,cxn,thread_id):
        self.url_prefix_main=settings.S_main_url_prefix+'/'+settings.S_target_website
        self.url_prefix_delta=settings.S_delta_url_prefix+'/'+settings.S_target_website
        self.cur=cur
        self.cxn=cxn
        self.thread_id=thread_id
        
    def pipeline(self,item):
        '''更改网页的链接，保存网页'''
        if item.url=='':
            return
        root_dir=settings.S_root_dir
        sub_dir=urlparse.urlparse(item.url)[2]
        ext=funcs.get_url_ext(item.url)                     #获取文件扩展名
        
        item,file_path=self.update_file_path(item,root_dir,sub_dir)
        
        if (ext not in settings.S_img_ext) and (ext not in ('css','js')): 
            #item=self.update_css_js(item)
            item=self.modify_tree(item)
        if self.record(item):
            if self.check_dir_path(os.path.dirname(file_path)):
                if self.save_data(file_path,item):
                    print 'pipleline: '+str(self.thread_id)+' : '+str(item.idx)+' : '+item.url
        else:
            print 'pipleline: '+str(self.thread_id)+' : no need to update '+str(item.idx)+' : '+item.url
            
    def update_file_path(self,item,root_dir,sub_dir):   
        '''改变网页的名字和路径'''
        item.idx=funcs.get_md52int(item.url)                #计算文件的id号  
        domain_name=funcs.url2domain(item.url)
        ext=funcs.get_url_ext(item.url)                     #获取文件扩展名
        if ext in ('css','js'):
            file_path=root_dir+self.url_prefix_delta+'/css_js'+sub_dir    #css和js不用改名，放在css_js文件夹
        elif ext in settings.S_img_ext:
            file_path=root_dir+self.url_prefix_delta+'/'+domain_name+sub_dir
        else:
            #将html文件名改成id号
            sub_dir=funcs.url2path(item.url)
            dir_path=os.path.dirname(root_dir+self.url_prefix_delta+sub_dir)
            file_path=dir_path+'/'+str(item.idx)+'.html'  
        return item,file_path
         
    def check_dir_path(self,dir_path):
        '''查看目标路径是否存在，不存在则新建'''
        if os.path.isdir(dir_path):
            return True
        else:
            try:
                os.makedirs(dir_path)
                return True
            except:
                return False
            
    def modify_tree(self,item):
        '''更改网页链接'''
        item.soup=self.both_need_clear(item.soup)
        #删除csdn不需要的东西
        if settings.S_target_website=='csdn':
            item.soup=self.csdn_clear(item.soup)
        #删除iteye不需要的东西
        if settings.S_target_website=='iteye':
            item.soup=self.iteye_clear(item.soup)
        #修改a链接
        a_links=item.soup.find_all('a',href=True)
        for a in a_links:
            href=a.get('href','')
            if '#' in href:
                url_parts=href.split('#')         #去掉锚点
                turl=url_parts[0].strip()
                if len(url_parts)>=2:
                    anchor=url_parts[1].strip()
                else:
                    anchor=''
            else:
                turl=href
                anchor=''
            if turl=='' or turl.startswith('javascript') or turl.startswith('#'):
                continue
            if funcs.valid_url(item.url,turl):
                c_href=urlparse.urljoin(item.url,turl)
                if len(re.findall('/',c_href))==2:         #使http://www.csdn.net变成http://www.csdn.net/
                    c_href=c_href+'/'
                        
                sub_dir=funcs.url2path(c_href)
                c_idx=funcs.get_md52int(c_href)
                c_href=self.url_prefix_main+sub_dir
                dir_path=os.path.dirname(c_href)
                c_href=dir_path+'/'+str(c_idx)+'.html'
                if anchor:
                    a['href']=c_href+'#'+anchor 
                else:
                    a['href']=c_href
        #修改css链接
        css_links=item.soup.find_all('link',href=True)        
        for css in css_links:
            href=css.get('href','')
            if funcs.valid_url(item.url,href):
                c_href=urlparse.urljoin(item.url,href)
                sub_url=urlparse.urlparse(c_href)[2]       #使http://www.csdn.net/../sdf变成http://www.csdn.net/sdf
                if sub_url.startswith('/../'):
                    sub_url=sub_url[3:]
                c_href=self.url_prefix_main+'/css_js'+sub_url
                css['href']=c_href
        #修改js链接
        js_links=item.soup.find_all('script',src=True)        
        for js in js_links:
            href=js.get('script','')
            if funcs.valid_url(item.url,href):
                c_href=urlparse.urljoin(item.url,href)
                sub_url=urlparse.urlparse(c_href)[2]       #使http://www.csdn.net/../sdf变成http://www.csdn.net/sdf
                if sub_url.startswith('/../'):
                    sub_url=sub_url[3:]
                c_href=self.url_prefix_main+'/css_js'+sub_url
                js['script']=c_href
        return item
    def decompose_element(self,soup,field,_class='',_id=''):
        '''删除元素'''
        if _class:
            flist=soup.select('%s[class="%s"]'%(field,_class))
            for element in flist:
                element.decompose()
        elif _id:
            flist=soup.select('%s[id="%s"]'%(field,_id))
            for element in flist:
                element.decompose()
        else:
            flist=soup.find_all(field)
            for element in flist:
                element.decompose()
        return soup
    def clear_element(self,soup,field,_class='',_id=''):
        '''删除元素'''
        if _class:
            flist=soup.select('%s[class="%s"]'%(field,_class))
            for element in flist:
                element.clear()
        elif _id:
            flist=soup.select('%s[id="%s"]'%(field,_id))
            for element in flist:
                element.clear()
        else:
            flist=soup.find_all(field)
            for element in flist:
                element.clear()
        return soup
    def both_need_clear(self,soup):
        soup=self.decompose_element(soup,'script')  
        soup=self.decompose_element(soup,'base')
        return soup
    def csdn_clear(self,soup):
        soup=self.clear_element(soup,'div',_class='nav')                #顶部
        soup=self.decompose_element(soup,'script',_id='frm_tt1')        #顶部
        soup=self.clear_element(soup,'table',_class='comt')             #底部留言框
        return soup
    
    def iteye_clear(self,soup):
        return soup
        
    def record(self,item):
        '''数据库操作，更新设置等'''
        if not settings.S_is_update:
            dt=datetime.datetime.now()
            time_format='%Y-%m-%d %H:%M:%S'
            update_time=dt.strftime(time_format)
            sql_prefix='insert into %s '%settings.S_target_website
            sql=sql_prefix+'(id,url,update_time,file_length,update_fre) values(%s,%s,%s,%s,%s)'
            self.cur.execute(sql,(str(item.idx),item.url,update_time,str(item.file_length),str(1)))
            return_value=True
        else:
            sql_prefix='select file_length,update_fre from %s '%settings.S_target_website
            sql=sql_prefix+'where id=%s'
            self.cur.execute(sql,str(item.idx))
            record=self.cur.fetchone()
            old_file_length=record[0]
            old_update_fre=record[1]
            if abs(old_file_length-item.file_length)>500:
                #需要更新，重置频率,下载时间
                dt=datetime.datetime.now()
                time_format='%Y-%m-%d %H:%M:%S'
                update_time=dt.strftime(time_format)
                sql_prefix='update %s '%settings.S_target_website
                sql=sql_prefix+'SET update_time=%s,update_fre =%s WHERE id =%s'
                self.cur.execute(sql,(update_time,str(1),str(item.idx)))
                return_value=True
            else:
                #不需要更新，重置频率
                #if old_update_fre<90:
                update_fre=old_update_fre*2+random.randint(1,20)
                #else:
                    #update_fre=90
                sql_prefix='update %s '%settings.S_target_website
                sql=sql_prefix+'SET update_fre =%s WHERE id =%s'
                self.cur.execute(sql,(str(update_fre),str(item.idx)))
                return_value=False
                
        self.cxn.commit()
        return return_value
    
    def save_data(self,path,item):
        '''保存网页'''
        try:
            ext=funcs.get_url_ext(item.url)
            if ext in settings.S_img_ext:
                w_type='wb'
                data=item.content
            elif ext in ('css','js'):
                w_type='w'
                data=item.content
            else:
                w_type='w'
                if item.coding:
                    #data=str(item.soup).decode('utf-8','ignore').encode(item.coding,'ignore')
                    #data=item.soup.prettify(item.coding)
                    data=item.soup.prettify()           #都转换为utf-8
                else:
                    #data=str(item.soup)
                    data=item.soup.prettify()
            if len(data.strip())>0:
                f=open(path,w_type)
                f.write(data)
                f.close()
                return True
            else:
                raise Exception(item.url+' content is empty')
        except Exception,e:
            msg='pipeline: '+item.url+' save fail :'+str(e)
            print msg,str(e)
            save_fail_log=os.path.abspath('.').replace('\\','/')+'/save_fail.txt'
            f=open(save_fail_log,'a')
            f.write(msg+'\n')
            f.close()
            return False
        
        
if __name__=='__main__':
    ext=funcs.get_url_ext('http://asdf.com/sdf.css')
    
    if ext in ('css','js'):
        print ext

















        
