# -*- coding: utf-8 -*-
"""
Created on Sun Apr  2 19:09:54 2017

@author: BaiYunfei
"""
import urllib
import zlib
import re

from gzip import GzipFile
from io import BytesIO

class Spyder():
    #原始数据文件路径
    urls_file = "E:/Repository/tipdm/raw_data/bbs_urls.txt"
    storage_location = "E:/Repository/tipdm/output"
    
    url_queue = []

    error_queue = []    
    
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36",
        "Accept-Language": "zh-CN,zh;q=0.8",
        "Connection": "keep-alive"
    }
    
    #根据获取完整的页面内容
    def get_page(self,url):
        if( url == '' ):
            return -1
        req = urllib.request.Request(url,headers=self.headers)
        try:
            resp = urllib.request.urlopen(req)
        except :
            print("Not Found: ",url)
            self.error_queue.append(("Not Found",url))
            return -1
        if(resp.code == 200):
            print("Reading: ",url)
            page = resp.read()  
            
            #解压缩
            #判断是否压缩
            content_encoding = resp.getheader('Content-Encoding')
            #有压缩的话，先解压缩
            if(content_encoding == 'gzip'): #gzip
                buf = BytesIO(page)
                gzip_f = GzipFile(fileobj=buf)
                html = gzip_f.read()
            elif(content_encoding == 'deflate'): #deflate
                html = zlib.decompress(page)
            
            #解码
            encode = 'utf-8'
            content_type = resp.getheader('Content-Type')
            result = re.search(r'charset=[\w\-]*',content_type)
            if(result != None):
                encode = result.group()
                encode = encode.replace('charset=','')
            try :
                html = page.decode(encode)
            except :
                try:
                    html = page.decode('gbk')
                except : 
                    print("Decode error: ",url)   
                    self.error_queue.append(("Decode error",url))
                    return -1;
            return html;
        else:
            print(resp.code,": ",url)
            self.error_queue.append((resp.code,url))
        return -1;
        
    #从文件中读取url，存放在队列中
    def __load_urls(self):
        with open(self.urls_file,'rt') as f:
            for line in f:
                self.url_queue.append(line.replace('\n',''))
    
    #存到文件中
    def save_to_file(self,content,file_name):
        if(content == -1):
            return -1;
        with open(self.storage_location+'/'+file_name,'wb') as f:
            f.write(content.encode('utf-8'))
    
    def save_all_pages(self):
        count = 0
        for url in self.url_queue:
            count = count + 1
            html = self.get_page(url)
            self.save_to_file(html,str(count))
                
    def __init__(self,urls_file,storage_location="output"):
        self.urls_file = urls_file
        self.storage_location = storage_location
        self.__load_urls()

if __name__ == '__main__':
    spyder = Spyder("E:/Repository/tipdm/raw_data/bbs_urls.txt","E:/Repository/tipdm/output")
    spyder.save_all_pages()