# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import struct,urlparse,os,urllib2,cookielib,re,urllib,re,time,datetime,random,json
cook_jar=cookielib.CookieJar()
cookie_support=urllib2.HTTPCookieProcessor(cook_jar)
opener=urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(opener)
def get_url_data(url,postdata={}):
    
    post_data=urllib.urlencode(postdata)

    headers={
        'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6',
        'Accept-Language':'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3',
        #'Referer':'http://cssv5.acer.com.cn/AcerSite/Views/ASP/QueryASP.aspx?sheet=founder'
        }
    if postdata:
        req=urllib2.Request(url=url,data=post_data,headers=headers)
    else:
        req=urllib2.Request(url=url,headers=headers)
    res=urllib2.urlopen(req,timeout=30)
    data=res.read()
    res.close()
    return data

coding_list=['utf-8', 'gb2312', 'gbk', 'gb18030', 'cp1252', ]
def decode_data(data):
    '''解码数据'''
    for coding in coding_list:
        try:
            data=data.decode(coding)
            return data
        except:
            pass
    return u''
def on_get_elem(soup,field,href):
    urls=[]
    for f in soup.find_all(field):
        url=f.get(href,'').strip()
        if url:
            urls.append(url)
    return urls

re_tag=re.compile('<[\s\S]+?>')

def remove_sym(data):
    data=data.replace(' ','')
    data=re.sub('\s','',data)
    data=re_tag.sub('',data)
    data=data.replace(',',' ')
    return decode_data(data.strip())
p=''
c=''
def get_province(elem):
    global p
    #p=elem.find_all('td')[0].get_text()
    addr=get_addr(elem)[:4]
    i=addr.find('省')
    if i!=-1:
        return addr[:i+1]
    i=addr.find('市')
    if i!=-1:
        return addr[:i+1]
    i=addr.find('区')
    if i!=-1:
        return addr[:i+1]
    return ''
def get_city(elem):
    global c
    #c=elem.find_all('td')[-3].get_text()
    addr=get_addr(elem)
    p=get_province(elem)

    addr=addr[len(p):8]

    i=addr.find(decode_data('县'))
    if i!=-1:
        return addr[:i+1]
    i=addr.find(decode_data('市'))
    if i!=-1:
        return addr[:i+1]
    i=addr.find(decode_data('区'))
    if i!=-1:
        return addr[:i+1]
        
    return ''

def get_service(elem):
    return ''

def get_site_name(elem):
    cc=elem.find('span',attrs={'class':'nk-search-document-type'})
    if cc:
        cc.decompose()
        s=str(elem.find('div',attrs={'class':'nk-text-medium nk-generic-bold'})).strip()
        return remove_sym(s)
    return ''

def get_addr(elem):
    #elem.find('br').decompose()
    #return elem.get_text().split('<br>')[0].strip()
    #print str(elem)
    #addr=str(elem).split('<br/>')[1].strip()
    #addr=remove_sym(addr)
    blist=elem.find_all('b')
    for b in blist:
        b.decompose()
    s=str(elem.find('div',attrs={'class':'nk-block-gray-text'})).split('<br/>')[0]
    
    return remove_sym(s)

def get_contact(elem):
    s1=str(elem.find('div',attrs={'class':'nk-block-gray-text'})).split('<br/>')
    #print s1
    if len(s1)>=3:
        s='电话:'+s1[1]+'传真：'+s1[2]
    elif len(s1)>=2:
        s='电话:'+s1[1]
    else:
        s=''
        #print s
    return remove_sym(s)

def get_work_time(elem):
    return ''

pc_list=[]
def build_p_c():
    global pc_list
    f=open('d:/tmp.txt')
    for l in f:
        l=decode_data(l)
        
        m=l[len('citylist.push( new Array('):-5]
        ml=m.split(',')
        #print ml
        p=ml[0]
        for i in ml[1:]:
            pcl=['','']
            pcl[0]=p[1:-1]
            pcl[1]=i.strip()[1:-1]
            pc_list.append(pcl)
#build_p_c()
##for i in pc_list:
##    print i[0],i[1]
##exit(0)
def get_post_data(soup):
    global p
    global c
    global pc_list
    if len(pc_list)>0:
        pc=pc_list.pop()
    else:
        return None
    p=pc[0]
    c=pc[1]
    rd={}
    rd['CityIndex']=0
    rd['ProvinceIndex']=0
    
    rd['cityname']=pc[1]
    rd['selCity']=pc[1]
    rd['selProvince']=pc[0]
    return rd

def _get_elem(soup,field,_class='',_id=''):
    if _class:
        flist=soup.select('%s[class="%s"]'%(field,_class))
    elif _id:
        flist=soup.select('%s[id="%s"]'%(field,_id))
    else:
        flist=soup.find_all(field)
    return flist
had_get_set=set()
def get_elem(soup):
    global p
    global c
    global had_get_set
    rlist=[]
    table=_get_elem(soup,'div',_class="nk-t017-c")[0].select('a')
    #tables=soup.find('table')[1:]
    #print table
    
    for tr in table:
        if tr.select('li'):
            continue
        if not tr.select('div'):
            continue
        hashcode=hash(tr)
        if hashcode in had_get_set:
            continue
        else:
            had_get_set.add(hashcode)
        #print tr
        rstr=''
        rstr=rstr+get_province(tr)+','
        rstr=rstr+get_city(tr)+','
        rstr=rstr+get_service(tr)+','
        rstr=rstr+get_site_name(tr)+','
        rstr=rstr+get_addr(tr)+','
        rstr=rstr+get_contact(tr)+','
        rstr=rstr+get_work_time(tr)+','
        rlist.append(rstr)
        print rstr
    return rlist[:]

def parse(soup):
    rlist=get_elem(soup)
    return rlist

def save_data(data):
    f=open('d:/dp/nokia.txt','w')
    f.write(data)
    f.close()
    
def main():
    fdata=''  
    for i in range(0,1):
        url='http://www.konicaminolta.com.cn/business/support/printer/service_repair/index.php'
        print i,url

        plist=['安徽', '香港','澳门','台湾','北京', '重庆', '福建', '甘肃', '广东', '广西', '贵州', '海南', '河北', '河南', '黑龙江', '湖北', '湖南', '吉林', '江苏', '江西', '辽宁', '内蒙古', '宁夏', '青海', '山东', '山西', '陕西', '上海', '四川', '天津', '西藏', '新疆', '云南', '浙江']
        
        for p in plist:
            print (decode_data(p))
            url='http://www.nokia.com/cn-zh/store-locator/?qt=%s&action=storeSearch&country=CN&tags=Nokia_Care_Location'%(decode_data(p))
            udata=decode_data(get_url_data(url))
            #print udata
            soup=BeautifulSoup(str(udata),'lxml',from_encoding="utf-8")
            rlist=parse(soup)

            if len(rlist)==0:
                print 'error:',i,url
            for l in rlist:
                fdata=fdata+l+'\n'
                
            pagel=soup.find('div',attrs={'class':'nk-right nk-pagination nk-generic-bold'})
            if pagel:
                pas=pagel.find_all('a')
                hrefs=set()
                #print pas
                for u in pas:
                    href=u.get('href','')
                    print href
                    if href in hrefs:
                        continue
                    else:
                        hrefs.add(href)
                    if href:
                        url='http://www.nokia.com'+href
                        udata=decode_data(get_url_data(url))
                        #print udata
                        soup=BeautifulSoup(str(udata),'lxml',from_encoding="utf-8")
                        
                
                        rlist=parse(soup)

                        if len(rlist)==0:
                            print 'error:',i,url
                        for l in rlist:
                            fdata=fdata+l+'\n'
                        time.sleep(0.2)
    print '-'*50
    print fdata
    save_data(fdata)
main()

print 'end'




















