# -*- encoding:utf-8 -*-

import requests
from page.views import rout
# from django.http.request import HttpRequest
from django.conf import settings
import os
import requests
from urlparse import urljoin
from bs4 import BeautifulSoup
import re
from models import Studio
from common.db_tools import get_or_none
import zipfile
import shutil


def zip_studio(studio):
    free = Freeze(studio)
    free.zip_studio() 
    studio.freeze.done_zip()
    return free.root

def zip_app(app):
    pass

def zip_dir(path):
    pass



class Freeze(object):
    """
    zip_studio(): 重新打包整个studio
    每次执行完后，需要调用一下self.done()函数
    """
    def __init__(self,studio):
        """
        @studio: studio model object
        """
        self.studio=studio
        # self.name=studio_name
        self.base_url = urljoin( 'http://'+settings.APG_DOMAIN,'page/{name}/'.format(name=self.studio.name))
        self.html_dir= os.path.join(settings.MEDIA_ROOT,'html')
        self.root=os.path.join(self.html_dir,self.studio.name)
        
        shutil.rmtree(self.root, ignore_errors=True, )
        self.img_path=os.path.join(self.root,'image')
        self.js_path=os.path.join(self.root,'js')
        self.css_path=os.path.join(self.root,'css')
        
        self.mkdir(self.root)
        self.mkdir(self.img_path)
        self.mkdir(self.js_path)
        self.mkdir(self.css_path)
        
        self.scrapyed_link=[]
        self.scrapyed_img=[]
        self.scrapyed_js=[]
        self.scrapyed_css=[]
        
        # self.top_page=['home','game_list','contact','policy']
    
    def mkdir(self,path):
        try:
            os.makedirs(path)
        except os.error as e: 
            print(e) 
    
    def zipfile(self):
        idir=os.path.join(self.html_dir,self.studio.name)
        with zipfile.ZipFile(idir+'.zip' ,'w') as izip:
            for r,ds,fs in os.walk(self.root):
                for f in fs:
                    filename=os.path.join(r,f)
                    name=os.path.relpath(os.path.join(r,f),idir)                    
                    izip.write(filename,name) 
                    
                    
    def zip_studio(self):
        self.save_link('home')
        app_names= self.save_link('collection')
        self.save_link('contact')
        self.save_link('policy')
        self.save_favicon()
        for app_name in app_names:
            self.save_link(app_name)
        self.zipfile()


    def zip_app(self,app):
        pass
    
            
    def save_favicon(self):
        url = self.studio.favicon
        if url:
            url=urljoin(self.base_url,url)
            rt = requests.get(url)
            with open(os.path.join(self.root,'favicon.ico'),'wb') as f:
                f.write(rt.content)
        else:
            print('----not valid favicon url')
            
        
    def save_link(self,name):
        """
        link/sub_link pattern : /page/studio_name/content
        
        """
        if name not in self.scrapyed_link:
            print('freezing '+name)
            self.scrapyed_link.append(name)
            url = urljoin(self.base_url,name)
            rsp = requests.get(url)
            soup=BeautifulSoup(rsp.text)
            
            self.save_resource(soup)
            sub_entry = self.replace_link(soup)
            
            html_save_path = os.path.join(self.root,name+'.html')
            with open(html_save_path,'wb') as f:
                f.write(str(soup))
            return sub_entry
        else:
            return []
    

    def save_resource(self,soup):
        self.save_css(soup)
        self.save_js(soup)
        self.save_image(soup)
        
    def save_css(self,soup):
        for link in soup.select('link'):
            url = urljoin(self.base_url,link['href'])
            filename=str(hash(url))+'.css'
            if filename not in self.scrapyed_css:
                rt = requests.get(url)
                self.scrapyed_css.append(filename)
                with open(os.path.join(self.css_path,filename),'wb') as f:
                    f.write(rt.content)
            
            link['href']='./css/{name}'.format(name=filename)
    
    def save_js(self,soup):
        for script in soup.select('script'):
            if not script.get('src'):
                continue
            url=urljoin(self.base_url,script['src'])
            filename=str(hash(url))+'.js'
            if filename not in self.scrapyed_js:
                rt=requests.get(url)
                self.scrapyed_js.append(filename)            
                with open(os.path.join(self.js_path,filename),'wb') as f:
                    f.write(rt.content)
            
            script['src']='./js/{name}'.format(name=filename)
    
    def save_image(self,soup):
        for img in soup.select('img'):
            if not img.get('src'):
                continue
            url=urljoin(self.base_url,img['src'])
            hash_url= str(hash(url)) 
            if hash_url not in self.scrapyed_img:
                rt = requests.get(url)
                type = rt.headers.get('Content-Type')
                sufix = ''
                if type:
                    sufix=type.split('/')[-1]                
                filename=hash_url+'.'+ sufix
                self.scrapyed_img.append(filename)            
                with open(os.path.join(self.img_path,filename),'wb') as f:
                    f.write(rt.content)
            
            img['src']='./image/{name}'.format(name=filename)
            
               
    
    def replace_link(self,soup):
        sub_entry=[]
        for a in soup.select('a'):
            url=a.get('href')
            mt = re.search('/page/(.+)/([^/]+)',url)
            if mt:
                a['href']=mt.group(2)+'.html'
                sub_entry.append(mt.group(2))
        return sub_entry
    
    
    # def save_link(self,url):
        # if url in self.scrapyed_link:
            # return
        # else:
            # self.scrapyed_link.append(url)
        
        # rt = requests.get(url)
        # soup= BeautifulSoup(rt.text)
        # if re.search('/page/(.+)/([^/]+)',url):
            # self.save_resource(soup)
            # name=re.search('/page/(.+)/([^/]+)',url).group(2)+'.html'   
            # for a in soup.select('a'):
                # href=a.get('href')
                # if not href:
                    # continue
                # url=urljoin(self.base_url,href)
                # a['href']='./{name}'.format(name=name)
                # self.save_link(url)
                
            # with open(os.path.join(self.root,name),'wb') as f: 
                # f.write(str(soup))
  

    
    
    

    
    