# -*- coding:utf-8 -*-
'''
Created on 2012-7-17

@author: duoyi
'''
import hashlib,MySQLdb
import struct,urlparse,os,urllib2,cookielib,re,urllib
import settings

'''根据网址获取网页的md5值'''
#前13位为段地址，11位为段偏移地址，最后5位为整型地址
WEISHU=29
SEG=13
SEG_OFFSET=11
OFFSET_VALUE=5
def get_full_md5(url):
    m=hashlib.md5()
    m.update(url)
    b_m=m.digest()
    return b_m
def md52int(b_m,weishu=WEISHU):
    b_length=len(b_m)
    l_m=0L
    for i in xrange(b_length):
        b_quan=2**(8*i)
        b_int=struct.unpack('B',b_m[i])[0]
        l_m+=b_int*b_quan
    return l_m%(2**weishu)
def get_md52int(url):
    return md52int(get_full_md5(url))

def _get_seg_index(l_md5):
    return l_md5%(2**SEG)
def _get_seg_offset(l_md5):
    return (l_md5>>SEG)%(2**SEG_OFFSET)
def _get_seg_offset_value(l_md5):
    return l_md5>>(SEG+SEG_OFFSET)
def get_bit_size(l_md5):
    return _get_seg_index(l_md5),_get_seg_offset(l_md5),_get_seg_offset_value(l_md5)

def get_url_ext(url):
    '''获取后缀名'''
    sub_path=urlparse.urlparse(url)[2]
    ext=os.path.splitext(sub_path)[1][1:]
    ext=ext.split('@')[0]
    ext=ext.split('?')[0]
    return ext.lower()

def decode_data(data):
    '''解码数据'''
    for coding in settings.S_decode_list:
        try:
            data=data.decode(coding)
            return data,coding
        except:
            pass
    return u'',''
def get_re_patt(str1):
    str1=str1.replace('*','\*')
    str1=str1.replace('^','\^')
    str1=str1.replace('$','\$')
    str1=str1.replace('+','\+')
    str1=str1.replace('?','\?')
    str1=str1.replace('[','\[')
    str1=str1.replace(']','\]')
    str1=str1.replace('{','\{')
    str1=str1.replace('}','\}')
    str1=str1.replace('|','\|')
    str1=str1.replace('(','\(')
    str1=str1.replace(')','\)')
    str1=str1.replace('.','\.')
    return str1
def url2domain(url):
    url=url.strip()
    domain=urlparse.urlparse(url)[1]
    return domain
    
def split_path(path,split_len=4):
    upath=decode_data(path)[0]
    valid_path=upath.replace('/','').replace('.','')
    vp_list=list(valid_path)
    if len(vp_list)>split_len:
        vp_list=vp_list[:split_len]
    return_path=''
    for c in vp_list:
        return_path=return_path+'/'+c
    return_path+='/'+path
    return_path=return_path.encode('utf-8','ignore')
    return return_path
def url2path(url):
    url=url.lower()
    if len(re.findall('/',url))<=2:         #使http://www.csdn.net变成http://www.csdn.net/
        url=url+'/'
    url_parse=urlparse.urlparse(url)
    curl=url_parse[1]+url_parse[2]
    url_suffix=os.path.basename(curl)
    patt='([^/]+)/'
    path=''
    for part_url in re.findall(patt,curl):
        path+=split_path(part_url)
    path=path+'/'+url_suffix
    return path.strip()

def login_website():
    '''csdn'''
    cook_jar=cookielib.CookieJar()
    cookie_support=urllib2.HTTPCookieProcessor(cook_jar)
    opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
    urllib2.install_opener(opener)
    print 'logging'
    
    login_url='http://passport.csdn.net/ajax/accounthandler.ashx?t=log&u=dylinshi&p=123456a&remember=0&f=http%3A%2F%2Fblog.csdn.net%2F&rand=0.363029723724382'
    user_agents = [
            'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
            'Opera/9.25 (Windows NT 5.1; U; en)',
            'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
            'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
            'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
            'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
            ]
    headers={
            'User-Agent':user_agents[0],
            'Referer':settings.S_start_urls[0]
            }
    req=urllib2.Request(url=login_url,headers=headers)
    res = urllib2.urlopen(req)
    
    print 'code is :'+str(res.code)
    if res.code<=200:
        print 'login %s success'%settings.S_target_website
    else:
        print 'login %s fail'%settings.S_target_website
        print cook_jar._cookies
    return res

def get_login_data():
    '''iteye'''
    url='http://www.iteye.com/login'
    user_agents = ['Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',]
    headers={'User-Agent':user_agents[0],
            'Referer':'http://www.iteye.com/'}
    req=urllib2.Request(url=url,headers=headers)
    res = urllib2.urlopen(req)
    content=res.read()

    patt='<input [\s\S]+?>'
    input_list=re.findall(patt,content)
    key='authenticity_token'
    return_dic={}
    for minput in input_list:
        if key in minput:
            patt='value="([\s\S]+?)"'
            m=re.search(patt,minput)
            if m:
                value=m.group(1)
                return_dic[key]=value
    return return_dic
def login_website():                                                                ##
    '''iteye'''
    cook_jar=cookielib.CookieJar()
    cookie_support=urllib2.HTTPCookieProcessor(cook_jar)
    opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
    urllib2.install_opener(opener)

    login_extra_dic=get_login_data()
    print login_extra_dic
    print 'logging'
    
    login_url='http://www.iteye.com/login'
    user_agents = [
            'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
            'Opera/9.25 (Windows NT 5.1; U; en)',
            'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
            'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
            'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
            'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
            ]
    login_dic={'name':'dylinshi126',
               'password':'123456a',
               'remember_me':'1',
               'button':'ç»ãå½',
               }
    login_dic.update(login_extra_dic)
    post_data=urllib.urlencode(login_dic)
    headers={
            'User-Agent':user_agents[0],
            'Referer':'http://www.iteye.com/login'
            }
    req=urllib2.Request(url=login_url,data=post_data,headers=headers)
    res = urllib2.urlopen(req)
    
    print 'code is :'+str(res.code)
    if res.code<=200:
        print 'login %s success'%settings.S_target_website
    else:
        print 'login %s fail'%settings.S_target_website
    print cook_jar._cookies
    return res

def login_website():                                                                ##
    '''cnblogs'''
    cook_jar=cookielib.CookieJar()
    cookie_support=urllib2.HTTPCookieProcessor(cook_jar)
    opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
    urllib2.install_opener(opener)
    print 'logging'
    
    login_url='http://passport.cnblogs.com/login.aspx?ReturnUrl=http%3a%2f%2fwww.cnblogs.com%2f'
    user_agents = [
            'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
            'Opera/9.25 (Windows NT 5.1; U; en)',
            'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
            'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
            'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
            'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9'
            ]
    post_data=urllib.urlencode({'__EVENTTARGET':'',
                           '__EVENTARGUMENT':'',
                           '__VIEWSTATE':'/wEPDwULLTE1MzYzODg2NzZkGAEFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYBBQtjaGtSZW1lbWJlcm1QYDyKKI9af4b67Mzq2xFaL9Bt',
                           '__EVENTVALIDATION':'/wEWBQLWwpqPDQLyj/OQAgK3jsrkBALR55GJDgKC3IeGDE1m7t2mGlasoP1Hd9hLaFoI2G05',
                           'tbUserName':'dylinshi',
                           'tbPassword':'123456a',
                            'btnLogin':'ç»  å½',
                            'txtReturnUrl':'http://www.cnblogs.com/',
                           })
    headers={
            'User-Agent':user_agents[0],
            'Referer':settings.S_start_urls[0]
            }
    req=urllib2.Request(url=login_url,data=post_data,headers=headers)
    res = urllib2.urlopen(req)
    
    print 'code is :'+str(res.code)
    if res.code<=200:
        print 'login %s success'%settings.S_target_website
    else:
        print 'login %s fail'%settings.S_target_website
        print cook_jar._cookies
    return res
def login_website():
    '''51cto'''
    proxy_support = urllib2.ProxyHandler({'http':'127.0.0.1:8086'})

    cook_jar=cookielib.CookieJar()
    cookie_support=urllib2.HTTPCookieProcessor(cook_jar)
    opener=urllib2.build_opener(proxy_support,cookie_support,urllib2.HTTPHandler)
    urllib2.install_opener(opener)

    #cook_jar=cookielib.CookieJar()
    #cookie_support=urllib2.HTTPCookieProcessor(cook_jar)
    #opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
    #urllib2.install_opener(opener)
    
    print 'logging'
    login_url='http://home.51cto.com/index.php?s=/Index/doLogin'
    user_agents = ['Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1',]
    post_data=urllib.urlencode({'email':'duoylinshi@163.com',
                               'passwd':'123456a',
                               'autologin':'on',
                               'reback':'http%3A%2F%2Fwww.51cto.com%2F',
                               'button.x':36,
                               'button.y':17,
                               })
    headers={
            'User-Agent':user_agents[0],
            'Referer':'http://home.51cto.com/index.php?s=/Index/index/reback/http%253A%252F%252Fwww.51cto.com%252F/'
            }
    req=urllib2.Request(url=login_url,data=post_data,headers=headers)
    res = urllib2.urlopen(req)
    print 'code is :'+str(res.code)
    if res.code<=200:
        print 'login success' 
    else:
        print 'login fail'
    print cook_jar._cookies
    
    login_after_action(res)
    
    return res
def login_after_action(res):
    '''51cto'''
    login_content=res.read()
    patt='src="([\s\S]+?)"'
    src_list=re.findall(patt,login_content)
    for src in src_list:
        url=src
        user_agents = ['Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
                ]
        headers={
                'User-Agent':user_agents[0],
                'Referer':'http://www.51cto.com/'
                }
        req=urllib2.Request(url=url,headers=headers)
        res = urllib2.urlopen(req)
    print 'login end'

def get_db_cxn():
    cxn=MySQLdb.connect(db='pachong',host='localhost',user='root',passwd='123456ms')
    cxn.set_character_set('utf8')
    return cxn

def check_is_allow_domain(url):
    url=url.lower().strip()
    #if [t_url for t_url in S_allow_domains if url.startswith(t_url)]:
    deny_str='S_%s_deny_domains'%settings.S_target_website
    allow_str='S_%s_allow_domains'%settings.S_target_website
    if hasattr(settings,deny_str):
        deny_domains=getattr(settings,deny_str)
    else:
        print 'settings do not have %s'%deny_str
        exit()
    if hasattr(settings,allow_str):
        allow_domains=getattr(settings,allow_str)
    else:
        print 'settings do not have %s'%allow_str
        exit()
    #print deny_domains,allow_domains
    if [d_url for d_url in allow_domains if (d_url.lower() in url)]:
        return True
    
    if [d_url for d_url in deny_domains if (d_url.lower() in url)]:
        return False        
    
    domain=url2domain(url)
    if '51cto.com' in domain:                                                      ##
        return True
    
    return False
def valid_url(res_url,url):
    #去掉锚点
    url=url.split('#')[0].strip()
    if url=='' or url.startswith('javascript'):
        return ''
               
    if not url.startswith('http://'):
        url=urlparse.urljoin(res_url,url).strip()
        
    if url.startswith('http://'):
        if len(re.findall('/',url))==2:         #使http://www.csdn.net变成http://www.csdn.net/
            url=url+'/'
                
        sub_url=urlparse.urlparse(url)[2]       #使http://www.csdn.net/../sdf变成http://www.csdn.net/sdf
        if sub_url.startswith('/../'):
            url=urlparse.urljoin(res_url,sub_url[3:])
            sub_url=urlparse.urlparse(url)[2]
            if sub_url.startswith('/../'):
                return ''
            
        #获取文件扩展名
        #settings.S_white_ext.extend(settings.S_img_ext)             #下载图片
        ext=get_url_ext(url)
        if ext!='' and (ext not in settings.S_white_ext):
            return ''
        if ext in ('css','js'):             #只下载css
            pass
        elif ext=='js':                 #不下载js
            url=''
        #elif ext in settings.S_img_ext:                             #下载图片
            #pass
        elif check_is_allow_domain(url):
            pass
        else:
            url=''
    else:
        url=''
    return url
def create_tables(cur,cxn,table_name):
    sql='''
        CREATE TABLE `%s` (
          `id` bigint(20) NOT NULL,
          `url` varchar(400) NOT NULL,
          `update_time` char(50) NOT NULL,
          `file_length` int(11) NOT NULL,
          `update_fre` int(11) NOT NULL,
          PRIMARY KEY (`id`)
        )ENGINE=InnoDB DEFAULT CHARSET=utf8;
        '''%table_name
    try:
        cur.execute(sql)    
        print 'create table %s'%table_name
    except Exception:
        print 'table %s is existed'%table_name
    
    queue_sql='''
        CREATE TABLE `%s_queue` (
          `id` bigint(20) NOT NULL AUTO_INCREMENT,
          `url` varchar(400) NOT NULL,
          PRIMARY KEY (`id`)
        )ENGINE=InnoDB DEFAULT CHARSET=utf8;
        '''%table_name
    try:
        cur.execute(queue_sql)    
        print 'create table %s_queue'%table_name
    except Exception:
        print 'table %s_queue is existed'%table_name
        
if __name__=='__main__':
    #url='http://www.baidu.com'
    url='http://csdnimg.cn/www/css/main_new.css/fg/dsf.css@gfdgdfg'
    print get_url_ext(url)










