#! /usr/bin/env python
# -*- coding: utf-8

##	  Copyright 2012, Geoffrey GROFF and Dimitri SEGARD
##	  This file is part of otaku-loader.
##
##    otaku-loader is free software: you can redistribute it and/or modify
##    it under the terms of the GNU General Public License as published by
##    the Free Software Foundation, either version 3 of the License, or
##    (at your option) any later version.
##
##    otaku-loader is distributed in the hope that it will be useful,
##    but WITHOUT ANY WARRANTY; without even the implied warranty of
##    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
##    GNU General Public License for more details.
##
##    You should have received a copy of the GNU General Public License
##    along with otaku-loader.  If not, see <http://www.gnu.org/licenses/>.

import http.client
import urllib
import urllib.request
import urllib.error
import re
import os
import datetime
import sys
import time
from PyQt4.QtCore import *


import globales
import utils

#p_html_info_old = re.compile(r'<h1.*?>(.*?)<a*?<div align="justify".*<img.*?src="(.*)" ?/>(.*)<div class="hr_ddl".*<div style="text-align: left;">.*<li>Genres? : (.*)</li>.*<li>Nombre d\'épisodes : (.*)</li>.*<li>Nombre d\'épisodes en ligne : (.*)</li>.*<li>Licencié \? : (.*)</li>.*<li>Subbé par : <a target="_blank" href="(.*)">(.*)</a>.*</li>.*<li>Emplacement : (.*)</li>.*</div>.*<iframe', re.DOTALL)

#p_html_info = re.compile(r'<h1.*?>(.*?)<a.*?<div style="margin: 10px 0;">(.*?)</div>.*?<div class="bloc_contextuel">.*?img src="(.*?)".*?</strong>.*?>(.*?)</a>.*?</strong>(.*?)</li>.*?</strong>(.*?)</li>.*?</strong>(.*?)/a> <a href="(.*?)".*?</strong>(.*?)</li>',re.DOTALL)

#p_html_info = re.compile(r'<h1.*?>(.*?)<a.*?<div style="margin: 10px 0;">(.*?)</div>.*?<div class="bloc_contextuel">(.*?)</ul>',re.DOTALL)
p_html_info = re.compile(r'<h1.*?>(.*?)<a.*?<div style="margin: 10px 0;".*?>(.*?)</div>.*?<div class="bloc_contextuel">(.*?)</ul>',re.DOTALL)

p_html_links_bloc = re.compile(r'<tr class="download.*?</tr>',re.DOTALL)
p_html_bloc_donnee = re.compile(r'id="(.*?)".*?strong>(.*?)<.*?cell">(.*?)<.*?cell">(.*?)<.*?cell">(.*?)<.*?cell">(.*?)<.*?cell">(.*?)<.*?cell">(.*?) fois',re.DOTALL)
p_html_bloc_oav = re.compile(r'id="(.*?)".*?cell">(.*?)<.*?cell">(.*?)<.*?cell">(.*?)<.*?cell">(.*?) fois',re.DOTALL)
p_test_oav = re.compile(r'bonus')
#p_html_contextuel = re.compile('.*?img src="(.*?)".*?</strong>.*?>(.*?)</a>.*?</strong>(.*?)</li>.*?</strong>(.*?)</li>.*?</strong>(.*?)/a> <a href="(.*?)".*?</strong>(.*?)</li>',re.DOTALL)

p_html_contextuel = re.compile('.*?img src="(.*?)".*?</strong>(.*?)</li>',re.DOTALL)

p_find_link = re.compile('<tr class="download" id="(.*?)" >')
p_find_link_impair = re.compile('<tr class="download cell_impaire" id="(.*?)" >')

class Dl_class (QObject):
        def __init__(self):
                QObject.__init__(self)
                
        def getDdlLink(self, idLien, idFiche):
##                print('[*] Getting link n°'+str(idLien)+' for fiche n°'+str(idFiche))
##                t0 = time.time()
                url = 'www.otaku-attitude.net'
                conn = http.client.HTTPConnection(url)
                conn.request("HEAD", ''.join(["/launch-download-anime-",str(idFiche),"-ddl-",str(idLien),".html"]))
                rep = conn.getresponse()
                rep.read()	
                conn.close()

##                print("[/] " + str(time.time() - t0))
                return rep.getheader('Location')

        def getDdlLinks(self, idFiche):
##                print('[*] Getting links for fiche n°'+idFiche)
##                t0 = time.time()
                url = 'www.otaku-attitude.net'
                conn = http.client.HTTPConnection(url)
                conn.request("GET", "/"+fiche)
                rep = conn.getresponse()
                        
                html = rep.read().decode('utf-8')

                result = re.findall(p_find_link, html)
                result += re.findall(p_find_link_impair, html)

##                print('Nombre de liens : '+str(len(result)))

                urls = []

                for id in result:		
                        conn.request("HEAD", ''.join(["/launch-download-anime-",str(idFiche),"-ddl-",str(id),".html"]))
                        rep = conn.getresponse()
                        rep.read()
                        
                        urls.append(rep.getheader('Location'))
                
                conn.close()
                
                print("resultat en " + str(time.time() - t0))

        def getFicheInfos(self, fiche):
##                t0 = time.time()
                filename = fiche[0:fiche.rfind('.')]+'.xml'
                idFiche = re.findall('fiche-anime-(.*?)-.*?.html', fiche)[0]
                dirname = 'fiches'
                dirimg = 'img'
                
##                print("[*] Get fiche info : "+filename)

                if os.path.isfile(dirname+'/'+filename):
##                        print("[-] Fiche already exist !")
                        return(False)
                
                url = 'www.otaku-attitude.net'
                conn = http.client.HTTPConnection(url)
                conn.request("GET", "/"+fiche)
                rep = conn.getresponse()
                html = rep.read().decode('utf8')
                conn.close()

                global p_html_info
                global p_html_links_bloc
                global p_html_bloc_donnee
                
                results = re.search(p_html_info, html)
                bloc = re.findall(p_html_links_bloc,html)
                
                titre = utils.full_clean(results.group(1))
                description = utils.full_clean(results.group(2))
                contexte = re.search(p_html_contextuel, results.group(3))
                
                img_url = contexte.group(1)
                img_name = img_url[img_url.rfind('/')+1:]
                genres = re.findall('<a href.*?>(.*?)</a>', contexte.group(2))
                
                if not os.path.isdir(dirname):
                        os.makedirs(dirname)
                        
                if not os.path.isdir(dirimg):
                        os.makedirs(dirimg) 

                try:		
                        rep = urllib.request.urlopen(img_url) 
                        #print("[*] Trying to dl img :"+img_name)
                        meta = rep.info()
                        
                        img_file = ''.join([dirimg,'/',img_name])

                        if not os.path.isfile(img_file):
                                with open(img_file, 'wb') as fichier:
                                        file_size = int(meta.get("Content-Length"))
                                        block_sz = 8192
                                        
                                        while True:
                                                buffer = rep.read(block_sz)
                                                if not buffer:
                                                        break
                                                fichier.write(buffer)
                except (urllib.error.HTTPError, urllib.error.URLError) as error:
                        print('[-] Data not retrieved because '+str(error)+'\nURL:'+img_url)
                        
                monXML = []
                oav = []
                oav.append("\t<bonus>\n")
                monXML.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
                monXML.append("<fiche>\n")
                monXML.append(''.join(["\t<id>",idFiche,"</id>\n"]))
                monXML.append(''.join(["\t<titre><![CDATA[",titre,"]]></titre>\n"]))
                #monXML.append(''.join(["\t\n"]))
                monXML.append(''.join(["\t\n"]))
                monXML.append(''.join(["\t<description><![CDATA[",description,"]]></description>\n"]))
                monXML.append("\t<episodes>\n")
                for tr in bloc:               
                        #une ligne de donnée contient dans l'ordre :
                        # id, numéro d'épisode, encodage, resolution, durée ( min:sec ), taille ( Mo ), date d'upload et nombre de dl
                        if re.search(p_test_oav,tr):
                                oav.append("\t\t<oav>\n")
                                donnees = re.search(p_html_bloc_oav,tr)
                                oav.append(''.join(["\t\t\t<id>",donnees.group(1),"</id>\n\t\t\t<nom>",donnees.group(2),"</nom>\n\t\t\t<taille>",donnees.group(3),"</taille>\n\t\t\t<upload>",donnees.group(4),"</upload>\n\t\t\t<nombre_de_dl>",donnees.group(5),"</nombre_de_dl>\n"]))
                                oav.append("\t\t</oav>\n")
                        else :
                                monXML.append("\t\t<episode>\n")
                                donnees = re.search(p_html_bloc_donnee,tr)
                                monXML.append(''.join(["\t\t\t<id>",donnees.group(1),"</id>\n\t\t\t<numero>",donnees.group(2),"</numero>\n\t\t\t<encodage>",donnees.group(3),"</encodage>\n\t\t\t<resolution>",donnees.group(4),"</resolution>\n\t\t\t<duree>",donnees.group(5),"</duree>\n\t\t\t<taille>",donnees.group(6),"</taille>\n\t\t\t<upload>",donnees.group(7),"</upload>\n\t\t\t<nombre_de_dl>",donnees.group(8),"</nombre_de_dl>\n"]))
                                monXML.append("\t\t</episode>\n")
                monXML.append("\t</episodes>\n")
                oav.append("\t</bonus>\n")
                monXML = monXML + oav
                monXML.append("\t<genres>\n")
                for genre in genres:
                        monXML.append(''.join(["\t\t<genre><![CDATA[",genre.strip(),"]]></genre>\n"]))

                monXML.append("\t</genres>\n")
                monXML.append("</fiche>")
                
                with open(dirname+"/"+filename, "w", encoding='utf-8') as fichier:
                        fichier.writelines(monXML)
                
##                print(''.join(["[/] ", str(time.time() -t0),"s"]))
                        
                return True

        def prepareAffichage(self):
                self.emit(SIGNAL("Prepare()"))


        def defilList(self):
                self.emit(SIGNAL("Defilement()"))

        def dlFile(self, url):
                dirname = 'dl'

                if not os.path.isdir(dirname):
                        os.makedirs(dirname)


                rep = urllib.request.urlopen(url)

                if not rep:
                        return(False)

                meta = rep.info()
                #print(meta)
                
                file_name = urllib.parse.unquote(url[url.rfind('/')+1:])

                file_size = int(meta.get("Content-Length"))
                taille = round((file_size/1024)/1024, 1)
                self.emit(SIGNAL("prepareAffichage(QString, float)"),file_name,file_size)

                #globales.statut_sel.set("Nom de l'épisode : "+file_name+"\nTaille : "+str(taille)+" Mo")

                if os.path.isfile(dirname+'/'+file_name):
                        #globales.titre_en_dl.set("Le fichier existe deja ! Abandon du telechargement.")
                        return(False)
                
                #print('Downloading: '+file_name+' '+str(taille)+ 'mo')

                with open(dirname+'/'+file_name+'.part', 'wb') as f:
                        file_size_dl = 0
                        block_sz = 8192
                        debut = time.clock()		
                        status = ''
                        avancement = 0

                        
                        while True:
                                buffer = rep.read(block_sz)
                                if not buffer:
                                        break
                                
                                if globales.kill:
                                        break

                                file_size_dl += len(buffer)
                                f.write(buffer)
                                current = time.clock()
                                duree = current - debut
                                
                                if duree == 0:
                                        duree = 1

                                pourcentage = file_size_dl * 100. / file_size

                                if int(pourcentage) > avancement:
                                        avancement = int(pourcentage)
                                        self.emit(SIGNAL("pourcentage(int)"),avancement)
                                

                                if(sys.platform == 'win32'):
                                        speed = round(((file_size_dl / duree) / 1024), 2)
                                else:
                                        speed = round((((file_size_dl / duree) / 1024) / 10), 2)
                                
                                status = r"%.1f Mo [%.2f%%] %.2f Ko/s" % (round((file_size_dl/1024)/1024,1), round(pourcentage, 2), speed)		
                                self.emit(SIGNAL("Statuts(QString)"),status)

                if not globales.kill:
                        os.rename(dirname+'/'+file_name+'.part', dirname+'/'+file_name)		
                        
                return (True)
                        #print('')

#dlFile('http://download.otaku-attitude.net/anime/34b75cc84ceec44f93789aeecfee1ca6/5003fef5/hack-quantum/%5BVocaMiku%5D_Hack_Quantum_03_%5BFIN%5D_%5BFULL_HD%5D.mkv')
#getFicheInfos("fiche-anime-375-tayutama-kiss-on-my-deity-.html")
#getFicheInfos("/fiche-anime-375-tayutama-kiss-on-my-deity-.html")
#getFicheInfos("/fiche-anime-375-tayutama-kiss-on-my-deity-.html")
#getFicheInfos("/fiche-anime-375-tayutama-kiss-on-my-deity-.html")
#getFicheInfos("fiche-anime-172-amagami-ss.html")
#getDdlLinks("fiche-anime-446-hack-quantum.html")
#getDdlLinks("fiche-anime-430-b-gata-h-kei.html")
