#!/usr/bin/python
# -*- coding: utf-8 -*-
# Description: A PE terminator.
# Create at 2008-7-12
# Author: MK2[fengmk2@gmail.com]
"""A PE terminator."""

import os
import time
import md5
import socket
from urllib2 import Request, urlopen, URLError, HTTPError, FTPHandler
from urlparse import urlparse
import ftplib
import struct
from const import FileType
import fileutil

IMAGE_DOS_SIGNATURE             = 0x5A4D
IMAGE_OS2_SIGNATURE             = 0x454E
IMAGE_OS2_SIGNATURE_LE          = 0x454C
IMAGE_VXD_SIGNATURE             = 0x454C
IMAGE_NT_SIGNATURE              = 0x00004550

# timeout in seconds
TIMEOUT = 15
socket.setdefaulttimeout(TIMEOUT)

def get_peurl_cursor():
    ''''''
    cursor = None
    while cursor is None:
        try:
            connection = MySQLdb.connect('192.168.48.249', 'root', 'sonic', 'peurl', charset='utf8')
            cursor = connection.cursor()
        except Exception, e:
            print e, 'sleep 10 secs,'
            cursor = None
            time.sleep(10)
    return cursor

class Terminator:
    user_agent_ie5 = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)' # IE5
    user_agent_ie6 = 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)' #IE6
    user_agent_ff3 = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9) Gecko/2008052906 Firefox/3.0'
#    a_size = 4096
#    b_size = 2048
#    c_size = 4096
    
    def __call__(cls, *args):
        if not hasattr(cls, 'peurl_cursor'):
            cls.peurl_cursor = get_peurl_cursor()
        return cls.peurl_cursor #single ton cursor()

    def __init__(self, url):
        self.url = url.lower()
        self.urlinfo = urlparse(self.url)
#        print self.urlinfo
        self.isftp = False
        if self.urlinfo.scheme == 'ftp':
            self.isftp = True
            self._handler = self.ftp_handler
        elif self.urlinfo.scheme != 'http':
            raise URLError('We just only terminate http and ftp...')
        else:
            self._handler = self.http_handler
            headers = {'User-Agent': self.user_agent_ff3,
#                       'Host': 'www.xdowns.com',
#                       'Referer': 'http://www.xdowns.com/soft/',
#                       'Cookie': 'ystat_cn_ss_6117=26_17099035_3870082595; yitian=yes; ASPSESSIONIDAQARQQRA=KCPODAMCCLHBCIILFEKLAGLI'
#                   'Keep-Alive': '300',
#                   'Connection': 'keep-alive'
                   }
            self.req = Request(self.url, headers=headers)
        self.size = None # if size is None ,meaning the url still waiting to be terminated...
    
    def _split(self, size):
        """给定文件大小，返回对文件的切片B的开始位置（中间切片）
        size 必须大于10240
        """
        if size < 10240:
            raise ValueError('size must be greater than 10 KB')
        return size / 2 - 1024
    
    @staticmethod
    def is_pe(datas):
        """判断文件的前4KB字节判断是否PE
        """
        if len(datas) == 0:
            return False
        try:
            dos_signature = struct.unpack('H', datas[0:2])[0]
            if dos_signature != IMAGE_DOS_SIGNATURE:
                return False
            offset = struct.unpack('L', datas[60:64])[0]
            signature = struct.unpack('L', datas[offset:offset+4])[0]
            result = False
            if signature == IMAGE_NT_SIGNATURE:
                result = True
            else:
                signature = struct.unpack('H', datas[offset:offset+2])[0]
                if signature in (IMAGE_OS2_SIGNATURE, IMAGE_OS2_SIGNATURE_LE):
                    result = True
            return result
        except struct.error:
            return False
    
    def read(self, offset=None, size=None):
#        print 'request', self.req.get_full_url()
        return self._handler(offset, size)
    
    def http_handler(self, offset, size):
        isrange = False
        wantcode = 200
        if offset is not None and size is not None:
            isrange = True
            wantcode = 206
            r = '%d-%d' % (offset, offset+size-1)
            self.req.add_header('RANGE', 'bytes=%s' % r)
#        print self.req.headers
        try:
            response = urlopen(self.req)
        except URLError, e:
            if hasattr(e, 'reason'):
                print 'We failed to reach a server.'
                print 'Reason: ', e.reason
            elif hasattr(e, 'code'):
#                print dir(e)
#                print e
#                print e.info()
                if e.code == 302:
                    self.realurl = e.geturl()
                    headers = self.req.headers
                    self.req = Request(self.realurl, headers=headers)
                    self.info = e.info()
                    print 'restart new url:', self.info.dict['location']
                    return self.read(offset, size)
                if e.code == 400:
                    info = urlparse(e.geturl())
                    self.realurl = '%s://%s%s' % (info[0], info[1], info[2].replace('//', '/'))
#                    print self.realurl
#                    headers = self.req.headers
#                    self.req = Request(self.realurl, headers=headers)
#                    self.info = e.info()
                    print 'restart new url:', self.realurl
                    return None
            print e.read()
            print e.geturl()
            raise e
        except IOError, e:
            print dir(e)
            print e.args
            print e
            raise e
        else:
            self.realurl = response.geturl()
            if self.realurl != self.url:
                headers = self.req.headers
                self.req = Request(self.realurl, headers=headers)
#                print 'set real url', self.realurl
#            print response
#            print dir(response)
#            print response
            
            self.status = response.code
            assert self.status == wantcode
            self.info = response.info()
#            print self.info
            if self.info['content-type'] == 'text/html': # 404被跳转了
                error404 = HTTPError(self.url, 404, 'Content-Type: text/html', 
                                self.req.headers, response.fp)
                print error404.info()
                raise error404
            if isrange:
                totalsize = int(self.info.dict['content-range'].split('/')[1])
            else:
                totalsize = int(self.info.dict['content-length'])
                size = totalsize
            modifiedtime = time.mktime(self.info.getdate('last-modified')) - time.timezone
            if not self.size:
                self.size = totalsize
                self.modifiedtime = modifiedtime
            else:
                assert self.modifiedtime == modifiedtime
                assert self.size == totalsize
#            assert self.info.dict['content-type'] == 'application/octet-stream'
            
#            assert len(buf) == size
            buf = response.read()
            return buf
        
    def ftp_handler(self, offset, size):
        pass
#        if self._ftp is None:
#            self._ftp = ftplib.FTP()
#        self
    
    SELECT_URL_BY_URL = 'SELECT count(*) FROM linkinfo WHERE url=%s'
    def is_url_dealwith(self):
        self.peurl_cursor.execute(self.SELECT_URL_BY_URL, self.url)
        return self.peurl_cursor.fetchone()[0] > 0
        
    def check(self):
        """检测url是否pe，文件大小，切片是否重复
        
        'ispe': True/False,
        'type': int , coulb be: 99:PE, other see FileType
        'size': int,
        'slice_exist': True/False
        """
        print '----------------------- A Slice -----------------------------------'
        self.a_buf = self.read(0, 4096)
        self.ispe = self.is_pe(self.a_buf)
        if self.ispe:
            self.type = FileType.PE
        else:
            self.type = fileutil.get_type(self.a_buf)
        print 'type', self.type
        if self.size < 10240:
            print 'less than 10KB, %d' % self.size
            self.smallfile = True
            return 
        self.smallfile = False
        if self.isftp: # if is ftp url, the whole file has been down... in a_buf
            return 
        self.a_md5 = md5.new(self.a_buf).hexdigest()
        
        print '----------------------- B Slice -----------------------------------'
        self.b_offset = self._split(self.size)
        self.b_buf = self.read(self.b_offset, 2048)
        self.b_md5 = md5.new(self.b_buf).hexdigest()
        
        print '----------------------- C Slice -----------------------------------'
        self.c_offset = self.size - 4096
        self.c_buf = self.read(self.c_offset, 4096)
        self.c_md5 = md5.new(self.c_buf).hexdigest()
        
        self.slice_md5 = md5.new('%s%s%s%d' \
                             % (self.a_md5, self.b_md5, self.c_md5, self.size)).hexdigest()
        
    def download(self, filepath):
        filepath = os.path.abspath(filepath)
        dirpath = os.path.split(filepath)[0]
        print os.path.split(filepath)
        fileutil.ensure_dir_exist(dirpath)
        fileobj = open(filepath, 'wb')
        if self.smallfile:
            if self.size <= 4096:
                fileobj.write(self.a_buf)
            else:
                fileobj.write(self.read())
        else:
            fileobj.write(self.a_buf)
            offset = 4096
            size = self.b_offset - offset
            fileobj.write(self.read(offset, size))
            fileobj.write(self.b_buf)
            offset = self.b_offset + 2048
            size = self.c_offset - offset
            fileobj.write(self.read(offset, size))
            fileobj.write(self.c_buf)
        fileobj.close()
        os.utime(filepath, (self.modifiedtime, self.modifiedtime))
    
    LIMIT_SIZE = 10 * (1024 ** 2)
    def terminate(self, filepath):
        """"""
        self.check()
        if self.ispe or self.type in (FileType.RAR, FileType.ZIP, FileType.CAB): #这些都是关注的文件
            if self.size <= self.LIMIT_SIZE:
                self.download(filepath)
            else:
                print 'larger than 10 MB'
        else:
            print 'Not pe or archive: %s' % self.type

def getxdownurl():
    url = 'http://www.xdowns.com/soft/xdowns.asp?softid=%d&downid=33&id=%d'
#    url = 'http://www.zhaodll.com/dll/download.asp?softid=25188&downid=2&id=25219'
    root = 'E:/pes/xdowns_com/'
    listfile = open(os.path.join(root, 'xdowns_list_23001-42999.lst'), 'a')
    if not os.path.exists(root):
        os.mkdir(root)
    for id in range(23001, 43000):
        term = Terminator(url % (id, id))
#        term = Terminator(url)
        try:
            term.read(0, 2)
            print id, term.realurl
            listfile.write(term.realurl + os.linesep)
        except Exception, e:
            print e
    listfile.close()

if __name__ == '__main__':
    getxdownurl()