#!/usr/bin/env python
#coding=utf-8
import socket,os,sys
socket.setdefaulttimeout(25)

import httplib2
import socks

import urlparse, urllib, urllib2
import re, pickle
import cookielib

#from gae_env import *
#from article import *

host = 'http://10.15.61.247'

proxy_host = 'proxy222.9966.org'
proxy_port = 3128
http_proxy = 'http://proxy222.9966.org:3128'
user_agent = 'Mozilla/5.0 (X11; U; Linux i686; zh-CN; rv:1.9.0.10) Gecko/2009042523 Ubuntu/9.04 (jaunty) Firefox/3.0.10'

#article_path = '/Users/young_jay/article'
article_path = '/home/jay/article'

def abpath(p):
    return urlparse.urljoin(host, p)
    
class TooManyArticles_Exp(Exception):
    
    def __init__(self, n=None, keyword=None):
        self.keyword = keyword
        self.n = n
        
    def __str__(self):
        return "Too Many Search Results."
        
def parse_search_result(page):
    #step 1 :see if how many results there is
    r = re.compile(r'共有记录(\d+)条')
    n = int( r.search(page).group(1) )
    '''
    if n > 1:
        raise TooManyArticles_Exp(n)
    '''
    # extract the link for the article
    r = re.compile(r'<a href="(detail.aspx\?QueryID=.+?)"')
    link = r.search(page).group(1)
    
    return abpath('kns50/%s'%link)

def parse_article(page):
    dl = re.compile(r"<a href='(/KNS50/download.aspx\?filename=.+?)'")
    caj, pdf = [abpath(url) for url in dl.findall(page)]
    return caj, pdf
    

url = 'http://10.15.61.247/kns50/Brief.aspx?ID=CJFD&classtype=&systemno=&NaviDatabaseName=&NaviField='
class ArticleGeter(object):
    
    def __init__(self, title, fpath=article_path):
        self.title = title
        self.fpath = fpath
        
        self.is_success = False
        
        self.opener = self._build_opener()
    
        self._add_opener_header('user-agent', user_agent)
        #print self.opener.addheaders
        
    def _add_opener_header(self, k, v):
        i = 0
        for x, y  in self.opener.addheaders:
            if x.lower() == k.lower():break
            i = i + 1
        if i < len(self.opener.addheaders):
            self.opener.addheaders.remove(self.opener.addheaders[i])
                
        self.opener.addheaders.append((k,v))
             
    def _build_opener(self, proxy=http_proxy):
        #Set up proxy infos
        proxy_support = urllib2.ProxyHandler({"http":proxy})

        #Set up cookie handler
        cookie_support = urllib2.HTTPCookieProcessor()

        #Create UrlLib2 opener
        opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=1),proxy_support, cookie_support)
        return opener
    
    def _search(self):
        
        body = open('body.txt').read()
        body = body.replace('search_word', urllib.quote(self.title))

        req = urllib2.Request(url, data=body)

        f = self.opener.open(req)
        resp, content = f.info(), f.read()
        
        link = parse_search_result(content)
        self._add_opener_header('Referer', link)
        return link
        
    def _get_article_page(self):
        f = self.opener.open(self._search())
        return f.info(), f.read()
    
    def _download_article(self, url):
        f = self.opener.open(url)
        
        try:
            fn = unicode( f.info().dict['content-disposition'].split('=')[1], 'gbk').encode('u8')
        except KeyError,e:
            return
        

        #fn = unicode( f.info().dict['content-disposition'].split('=')[1], 'gbk').encode('u8')
        #print self.fpath, fn
        if not os.path.exists(self.fpath):os.makedirs(self.fpath)
        fn = os.path.join(self.fpath, fn)
        #print fn
        open(fn, 'w').write(f.read())
    	if os.path.getsize(fn) > 100000:
    		self.is_success = True
    		
    def get_article(self):
        resp, page = self._get_article_page()
        caj, pdf = parse_article(page)
        self._download_article(pdf)

if __name__ == '__main__':
    title = sys.argv[1]
    a = ArticleGeter(title=title)
    a.get_article()

    if not a.is_success:
        sys.exit(1)
