# -*- coding: utf-8 -*-
import re
import os
from core import opener, del_task
from upload import upload
from var import *

jswarnings = ('对不起,文件涉及保密内容,不提供下载!给您造成不便请谅解!',
              '对不起,文件涉及保密内容,暂时不提供下载!给您造成不便请谅解!',
              '对不起，您还没有登录或登录后太长时间没有操作连接已经超时。匿名用户没有下载权限，请先登录后再进行下载！',
    )

dbs_pdflink_fromurl = ('30','10','82','45','17','226','101','279','261','47',
    '205','89','286','58','54','227',)

def get_pdflink_fromurl(db,url):
    link = ''
    #JSTOR
    if db == '30':
        if 'stable' in link:
            link = url.replace('stable','stable/pdfplus')+'.pdf?acceptTC=true'
        # springerLink
    elif db == '10':
        link = url+'fulltext.pdf'
        # oxford journal
    elif db == '82':
        link = url.replace('.extract','.full.pdf').replace('.abstract','.full.pdf')
    elif db == '45':
        link = url.replace('/abs/','/pdf/')
    elif db == '17':
        #link = url.replace('content/abstract','reprint')+'.pdf'
        link = url.replace('abstract','full.pdf')
    elif db == '226':
        link = url.replace('simplequerydetail','download')
        # RSC
    elif db == '101' or db == '279' or db == '261':
        link = url.replace('articlelanding','articlepdf').replace('ArticleLanding','articlepdf')
        #IOP
    elif db == '47':
        para = url.split('/')[-4:]
        link = url+'/pdf/'+reduce(lambda x,y:x+'_'+y,para)+'.pdf'
        print link
    elif db == '205':
        domains = ['www.bioscirep.org','www.biochemj.org']
        for d in domains:
            parts = re.findall("%s/(.*)/(.*)/(.*)\.htm"%d,url)
            if parts:
                parts = parts[0]
                link = "http://%s/%s/%s/%s/%s.pdf"\
                % (d,parts[0],parts[1],
                   parts[2].replace(parts[0],'').replace(parts[1],''),
                   parts[2].replace(parts[0],''))
                break
    elif db == '89':
        link = url.replace('.short','.full.pdf')
    elif db == '286':
        pass
    # nature
    elif db == '58':
        link = url.replace('/abs/','/pdf/').replace('.html','.pdf')
    elif db == '54':
        link = url.replace('.abstract','.full.pdf')
    elif db == '227':
        # http://www.bioline.org.br/abstract?id=pr10047&lang=en
        id = url[url.index('id=')+3:url.index('&')]
        link = 'http://www.bioline.org.br/pdf?'+id
    return link

def get_pdflink_frompage(db,page):
    link = ''
    for i,p in enumerate(allpatterns[db]):
        link = re.findall(p,page,re.DOTALL)
        if not link:
            continue
        link = link[0].replace('\n','').replace('\r','')
        if db == '24':
            if i == 0: # cnki: epub
                link = 'http://epub.cnki.net' + link
            if i == 1: # cnki: kcms
                link = 'http://www.cnki.net/kcms/' +\
                       link[link.index("download.aspx"):].replace('amp;','').replace('&#xA;','')
        if db == '2':
            if i == 0: # cnki: dlib
                link = "http://dlib.edu.cnki.net" + link
            if i == 1: # 万方
                try:
                    link = "http://f.g.wanfangdata.com.cn/" +\
                           re.findall("<iframe src=\"(.*?)\"",opener.open(link).read())[0]
                except Exception:
                    pass
        if db == '1':  # cnki学位论文 nh
            link = 'http://epub.cnki.net' + link
        if db == '26': # Science Direct
            print 'ssssssssssssssssssssssssss'
            pass
        if db == '46': # Wiley
            print link
            pass
        if db == '13': #IEEE
            pass
        break
    return link

def get_pdflink(db,dblink):
    request = opener.open(dblink)
    url = request.geturl()
    print db
    if db in dbs_pdflink_fromurl:
        return get_pdflink_fromurl(db,url)
    else:
        if db == '46':
            url = url.replace('/abstract','/pdf')
            request = opener.open(url)
        if db == '13':
            arnumber = re.search('arnumber=(\d+)',url).groups()[0]
            url = 'http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber='+arnumber
            request = opener.open(url)
        return get_pdflink_frompage(db,request.read())

def download(article):
    #print article.dbtype
    title = article.name
    all_dbs = article.dblink
    filepath = "pdf\\"+title+'.pdf'
    found = False
    for dblink in all_dbs:
        found = False
        #article.download_time = 0
        db = re.search('&type=(\d+)&',dblink).groups()[0]
        link = get_pdflink(db,dblink)
        if link:
            found = True
            if db == '1':
                filepath = filepath[:-4]+'.nh'
            with lock:
                print ">>>>>>>>>>>>begin downloading %s<<<<<<<<<<<<" % article.id
            if download_file(link,filepath):   #下载成功
                article.filepath = filepath
                upload_ready = check_file(article)
                with lock:
                    if  upload_ready == 1:
                        print ">>>>>>>>>>>>download %s ok<<<<<<<<<<<<" % article.id
                        return
                        #upload(article)
                    if upload_ready == 0:
                        download_queue.put(article)
                        print ">>>>>>>>>retry to download %s<<<<<<<<<" % article.id
                        return
                    if upload_ready == -1:
                        article.download_time = 0
                        print ">>>>>>>>>>download %s failure<<<<<<<<<" % article.id
            #else:
            #    download_file(link,filepath)  #下载第二次
    if not found:
        # 如果没找到下载地址，直接返回，可加上删除任务
        print '%s not found' % article.id
        del_task(article.id)
        return

def check_file(article):
    """
    检查下载的文档是否正确
    返回值：
    0：重试下载；-1：不能下载，删除任务；1：下载成功
    """
    article.download_time += 1
    if os.path.getsize(article.filepath) > 1024*200:
        return 1
    contents = open(article.filepath,'rb').read()
    # 看是否有html标记
    htmlmark = re.findall(r'<!DOCTYPE html .*?>',contents)
    if htmlmark:
        #if article.dbtype == '10':
            #print 'SpingerLink restrict to download this article!'
        #    del_task(article.id)
        #    os.remove(article.filepath)
        #    return -1
        if article.download_time > 2:
            return -1
        return 0
    # 如果没有html标记，看是否有js标记，主要针对cnki出现的情况
    jsmark = re.findall(r"<script language=javascript.*?alert\('(.*?)'\)",contents,re.DOTALL)
    if jsmark:
        mark = jsmark[0].decode('utf-8').encode('gbk').strip()
        if mark == '对不起，您的操作太过频繁。'.decode('utf-8').encode('gbk'):
            print mark
            return 0
        for jswarning in jswarnings:
            if mark == jswarning.decode('utf-8').encode('gbk'):
                del_task(article.id)
                os.remove(article.filepath)
                return -1
    else:
        return 1


def download_file(url,filepath):
    """
    下载文档
    """
    try:
        cnt = opener.open(url).read()
    except Exception,e:
        return False
    t = open(filepath,'wb')
    t.write(cnt)
    t.close()
    return True
