# coding=utf-8

'''
Created on 2014-2-28

@author: Administrator
'''
from bs4 import BeautifulSoup
from com.swiftcode.core import Dbopt
import re
import requests
import string


 

def getUrl():
    urls = [] 
    atozStr = string.uppercase[:26]
    urlPrefix = "http://www.swiftbic.com/countries-with-"
    for i in range(0,26): 
        urls.append(urlPrefix+atozStr[i]+".html") 
    return urls
#传入列表的url，返回这个url里面所有的国家信息
#是不是可以考虑做一个实体对象呢？也就是model
#自己的小东西无所谓了，用python强大的字典（map）
def getCountries(url):
    countries = []
    url = "http://www.swiftbic.com/countries-with-C.html"
    content = getContentByUrl(url)
    soup = BeautifulSoup(content) 
    #print  soup.find_all("ul", "country")
    lis = soup.find("ul", "country")
    for li in lis.find_all("li",{'class':'page'}):
        print "国家名称是:%s,连接地址是:%s,图片地址是:%s" % (li.a.text,li.a["href"],li["style"])
    

def getContentByUrl(url):
    page = requests.get(url)
    return page.text
def getImageHref(imgstr):
    # 将正则表达式编译成Pattern对象background: url(/img/flags/CM.png) 5px 0 no-repeat;
    pattern = re.compile(r'.* url\((.*)\)') 
    # 使用Pattern匹配文本，获得匹配结果，无法匹配时将返回None
    match = pattern.match('background: url(/img/flags/CM.png) 5px 0 no-repeat;') 
    if match:
        # 使用Match获得分组信息
        print match.group(1)
def downImg(filepah,imagepath): 
    print "Download Image File=", imagepath  
    r = requests.get(imagepath, stream=True) # here we need to set stream = True parameter  
    with open(filepah, 'wb') as f:  
        for chunk in r.iter_content(chunk_size=1024):  
            if chunk: # filter out keep-alive new chunks  
                f.write(chunk)  
                f.flush()  
        f.close()  
    
def getCountryUrls(url):
    url = "http://www.swiftbic.com/swift-code/country/CHINA.html"
    
    
    soup = BeautifulSoup(getContentByUrl(url),from_encoding = "utf-8")
    oddsToday = soup('table')[1]
    if(oddsToday == None):
        print "没有这一页"
    else:
        print "有数据"
def crawSwiftCode():
    results = Dbopt.getCountriesPage()
    for country in results:
        #要先把这个国家所有的数据清空年
        for url in getCountryPageUrls(country[3]):
            country[2] = url
            crawCode(country)
        #要把这个国家更新为采集完成

def crawCode(country):
    soup = BeautifulSoup(getContentByUrl(country[2]),from_encoding = "utf-8")
    site = "http://www.swiftbic.com"
    values =[]
    #by this option ,we get the odds today
    oddsToday = soup('table')[1]
    for tr in oddsToday.find_all('tr'): 
        tds = tr.find_all('td')
        td = tds[2] 
        text = site + td.a["href"]
        soup = BeautifulSoup(getContentByUrl(text),from_encoding = "utf-8")
        tip = soup.find("div", {'class':'alert-boxy boxy-warning'})
        if(tip == None):
            tip = 0
        else:
            tip = 1
        
        codetable = soup('table')[1]
        s_trs = codetable.find_all('tr')
        swiftcode = s_trs[0].find_all('td')[1].text.strip()
        bankname = s_trs[2].find_all('td')[1].text.strip()
        bankbranch = s_trs[3].find_all('td')[1].text.strip()
        city = s_trs[5].find_all('td')[1].text.strip()
        address = s_trs[6].find_all('td')[1].text.strip()
        location = s_trs[7].find_all('td')[1].text.strip() 
        values.append((swiftcode,bankname,bankbranch,city,address,location,tip,country[0],country[1]))
        #print "code is:%s,bankname is %s,bankbranch is %s,city is %s,address is %s,location is %s" % (swiftcode,bankname,bankbranch,city,address,location)
    print "URL:%s 有%s条数据" % (country[2],len(values))
    Dbopt.batchCodes(values) 
    
def getCountryPageUrls(url):
    urls = []
    urls.append(url)
    pageNum = getPageNum(url)
    if pageNum != 0:
        for i in range(2,pageNum):
            tempurl = url[:len(url)-5]+"-"+str(i)+".html"
            urls.append(tempurl)
    return urls
    
def getPageNum(url):
    page = 0
    soup = BeautifulSoup(getContentByUrl(url),from_encoding = "utf-8")
    if len(soup('center')) !=0: 
        aa = soup('center')[0]
        page = len(aa.find_all("a")) 
    return page
if  __name__ == '__main__':
#    print  getCountries("kjkj")
  #   print  getImageHref("kjkj")
    #downImg("E:\\aa.png","http://www.swiftbic.com/img/flags/CM.png"
    #url = "http://www.swiftbic.com/swift-code-AFABAFKA.html"
    url = "http://www.swiftbic.com/swift-code/country/CHINA.html"
    #url = "http://www.swiftbic.com/swift-code/country/AFGHANISTAN.html"
    results=[("98","ANDORRA","http://www.swiftbic.com/swift-code/country/ANDORRA.html"),(99,"UNITED ARAB EMIRATES","http://www.swiftbic.com//swift-code/country/UNITED-ARAB-EMIRATES.html")] 
    for country in results:
        for url in getCountryPageUrls(country[2]):
            country = list(country)
            country[2] = url
            print "正在采集%s ..." % (url)
            crawCode(country)
            
    