#! /usr/bin/env python
#-*- encoding:UTF-8 -*-

'''
    @author: yj
    @desc lbs数据处理
    @date 2013-11-12
'''
import re

import urllib
import urllib2
import hashlib
import time
import pymongo
import traceback

from django.utils import simplejson


dic_region={'上城区':58,'下城区':59,'江干区':61,'萧山区':6446,'西湖区':62,'拱墅区':60,'滨江区':63,'余杭区':8845,'淳安县':9164}

    
# con = pymongo.MongoClient('192.168.100.23',25017)
# con_city = pymongo.MongoClient('192.168.100.23',25017)
# con_user = pymongo.MongoClient('192.168.100.23',25017)
# con_bangs = pymongo.MongoClient('192.168.100.23',25017)
# con_crawler=pymongo.MongoClient('192.168.100.23',25017)
# con_base=pymongo.MongoClient('192.168.100.23',25017)


con = pymongo.MongoClient('192.168.1.51',27017)
con_city = pymongo.MongoClient('192.168.1.51',27017)
con_user = pymongo.MongoClient('192.168.1.51',27017)
con_bangs = pymongo.MongoClient('192.168.1.51',27017)
con_crawler=pymongo.MongoClient('192.168.1.51',27017)
con_base=pymongo.MongoClient('192.168.1.51',27017)

# con = pymongo.MongoClient('192.168.1.67',27017)
# con_city = pymongo.MongoClient('192.168.1.67',27017)
# con_user = pymongo.MongoClient('192.168.1.67',27017)
# con_bangs = pymongo.MongoClient('192.168.1.67',27017)
# con_crawler = pymongo.MongoClient('192.168.1.67',27017)
# con_base = pymongo.MongoClient('192.168.1.67',27017)




def get_auto_inc():
    '''
            返回计数器的值
    '''
    query = {'_id' : 'crawler_dianping_id'}
    update = {"$inc" : {'counter': 1}}
    auto_inc = con_base.base.counters.find_and_modify(query,update,upsert=True,new=True)
    
    if auto_inc:
        return auto_inc['counter']
    else:
        return 1    

def get_dianping_config():
    #请替换appkey和secret
    appkey = "2890921896"
    secret = "678bd2128edd485b87db32f21c4b21e3"
    return_dic = {}
    return_dic['appkey'] = appkey
    return_dic['secret'] = secret
    return return_dic




def dianping_demo_find_businesses_by_region():

    apiUrl = "http://api.dianping.com/v1/business/find_businesses_by_region"
    #http://api.dianping.com/v1/business/find_businesses_by_region?appkey=2890921896&sign=993A7E264C34BAE344C97BB853D2A24F20150FA6&has_coupon=1&limit=20&format=json&platform=2
    #示例参数
    paramSet = []
    
    category='美食'
    city='杭州'
    region='下城区'
    #paramSet.append(("format", "json"))
    paramSet.append(("city", city))
    paramSet.append(("category", category))
    paramSet.append(("region", region))
    paramSet.append(("limit", "40"))
    paramSet.append(("has_coupon", "1"))
    paramSet.append(("platform","2"))
    paramSet.append(("page","2"))
    
    
    #参数排序与拼接
    paramMap = {}
    for pair in paramSet:
        paramMap[pair[0]] = pair[1]
    
    codec = get_dianping_config().get('appkey')
    for key in sorted(paramMap.iterkeys()):
        codec += key + paramMap[key]
    
    codec += get_dianping_config().get('secret')
    
    #签名计算
    sign = (hashlib.sha1(codec).hexdigest()).upper()
    
    #拼接访问的URL
    url_trail = "appkey=" + get_dianping_config().get('appkey') + "&sign=" + sign
    for pair in paramSet:
        url_trail += "&" + pair[0] + "=" + pair[1]
    
    requestUrl = apiUrl + "?" + url_trail
    
    #模拟请求
    response = urllib.urlopen(requestUrl)
    
    #print response.read()
    the_page = response.read()
    dic_shzl_list = simplejson.loads(the_page, strict=False).get('businesses','') #解析json数据格式
    print  dic_shzl_list

'''
#得到dianping的商户id号
def get_dianping_id():
    dianping_id = 0
    #http://map.baidu.com/detail?qt=ninf&uid=8ee4560cf91d160e6cc02cd7&detail=cater
    url = 'http://www.dianping.com/search/category/3/10/r58'
    req = urllib2.Request(url,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1521.3 Safari/537.36'})
    print 'req.url',req.get_full_url()
    response = urllib2.urlopen(req)
    the_page = response.read()
    
#     f = urllib.urlopen(url)
#     the_page = f.read()
        
    
    print 'the_page',the_page
    #<a monitor="review_dianping" href="http://map.baidu.com/detail?qt=ur&amp;url=http%3A%2F%2Fwww.dianping.com%2Fshop%2F3142537%2Freview_all" style="background:url(http://map.baidu.com/fwmap/upload/place/icon/dianping/16.png) no-repeat;" target="_blank" stat="click|{area:comments,kid:dianping}">
#     m_dianping=re.search('<a monitor="review_dianping" href="(.*)" style="',str(the_page))
#     if m_dianping:
#         #print m_dianping.groups()[0]
#         dianping_url= m_dianping.group(1)
#         m_dianping_id = re.search('www.dianping.com%2Fshop%2F(\\d+)%2Freview_all',str(dianping_url))
#         if m_dianping_id:
#             dianping_id = int(m_dianping_id.group(1))
#             print 'dianping_id',dianping_id
#     return dianping_id
'''
        
def dianping_crawler_test001():
    #上城区
    url = 'http://www.dianping.com/search/category/3/10/r58'
    url = 'http://www.dianping.com/search/category/3/10/r1979p24'
    
    values = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1521.3 Safari/537.36','X-Request':'JSON','X-Requested-With':'XMLHttpRequest'}


#     data = urllib.urlencode(values)
#     print data
#     req = urllib2.Request(url, data)
    
    req = urllib2.Request(url, headers=values)
    
    response = urllib2.urlopen(req)
    the_page = response.read()
    print the_page
    shop_ids=[]
    
    m_dianping=re.search('shopIDs:\\s+\\[(.*)\\],',str(the_page))
    if m_dianping:
        shop_ids= m_dianping.group(1)
        print 'shop_ids',shop_ids
        
        
def get_dianping_id(region_url,i):
    url = 'http://www.dianping.com' + str(region_url) + 'p'+str(i)
    
    values = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1521.3 Safari/537.36','X-Request':'JSON','X-Requested-With':'XMLHttpRequest'}


#     data = urllib.urlencode(values)
#     print data
#     req = urllib2.Request(url, data)
    
    req = urllib2.Request(url, headers=values)
    
    response = urllib2.urlopen(req)
    the_page = response.read()
    #print the_page
    shop_ids=[]
    
    m_dianping=re.search('shopIDs:\\s+\\[(.*)\\],',str(the_page))
    if m_dianping:
        shop_ids_tmp= m_dianping.group(1)
        print 'shop_ids_tmp',shop_ids_tmp
        shop_ids = shop_ids_tmp.split(',')
         
        #print 'shop_ids',shop_ids
    return  shop_ids 

#上城区r58   
#http://www.dianping.com/search/ajax/regionlist/category/3/10/r58
def get_category_list_1(district_id):
    #SEARCH_API_URL_LIST = 'http://www.dianping.com/search/ajax/regionlist/category/3/10/r58'
    SEARCH_API_URL_LIST = 'http://www.dianping.com/search/ajax/regionlist/category/3/10/r' + str(district_id)
    req = urllib2.Request(SEARCH_API_URL_LIST,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1521.3 Safari/537.36'})
    print 'req.url',req.get_full_url()
    response = urllib2.urlopen(req)
    the_page = response.read()
    #print 'the_page',the_page
    dic_category_list = simplejson.loads(the_page, strict=False) #解析json数据格式
    #print 'dic_category_distinct',dic_category_distinct
    return  dic_category_list.get('msg').get('region')

#江干区r61->城东r1671
#http://www.dianping.com/search/ajax/regionlist/category/3/10/r1671?subid=61
def get_category_list_2(district_id,region_id):
    SEARCH_API_URL_LIST = 'http://www.dianping.com/search/ajax/regionlist/category/3/10/r'+str(region_id)+'?subid=' + str(district_id)
    req = urllib2.Request(SEARCH_API_URL_LIST,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1521.3 Safari/537.36'})
    print 'req.url',req.get_full_url()
    response = urllib2.urlopen(req)
    the_page = response.read()
    #print 'the_page',the_page
    dic_category_list = simplejson.loads(the_page, strict=False) #解析json数据格式
    #print 'dic_category_distinct',dic_category_distinct
    return  dic_category_list.get('msg').get('region')
    



def insert_db_crawler_info():
    
    district_list = ['上城区','下城区','江干区','萧山区']
    for district in district_list:
        print '---开始处理---',district
        district_id = dic_region.get(district)
        region_info_list_1 = get_category_list_1(district_id)
        if not region_info_list_1:
            continue
        for region_info_1 in region_info_list_1:
            region_id_1 = region_info_1.get('id',0)
            name_1 = region_info_1.get('name','')
            region_shopCount_1 = int(region_info_1.get('shopCount',0))
            region_url_1 = region_info_1.get('url','')
            #print 'district',district,'region_name',region_name,'region_shopCount',region_shopCount,'region_url',region_url
            condition_dic = {}
            condition_dic['_id'] = get_auto_inc()
            update_dic={}
            update_dic['district'] = district
            update_dic['region_id'] = region_id_1
            update_dic['region_name'] = name_1
            update_dic['region_url'] = region_url_1
            update_dic['shop_count'] = region_shopCount_1
            update_dic['lv'] = 2
            update_dic['type']='url'#获取的是抓取地址信息
            try:
                con_crawler.crawler.dianping.update(condition_dic, {'$set':update_dic},upsert=True,multi=False)        
            except:
                print 'errors---',traceback.print_exc()
            region_info_list_2 = get_category_list_2(district_id,region_id_1)
            
            if not region_info_list_2:
                continue
            for region_info_2 in region_info_list_2:
                region_id_2 = region_info_2.get('id',0)
                name_2 = region_info_2.get('name','')
                region_url_2 = region_info_2.get('url','')
                region_shopCount_2 = int(region_info_2.get('shopCount',0))
                print 'district',district,'l1-l2',name_1,name_2,'region_url_2',region_url_2,'region_shopCount_2',region_shopCount_2
                
                condition_dic = {}
                condition_dic['_id'] = get_auto_inc()
                update_dic={}
                update_dic['district'] = district
                update_dic['region_id'] = region_id_2
                update_dic['father_id'] = region_id_1#上级目录
                update_dic['region_name'] = name_2
                update_dic['region_url'] = region_url_2
                update_dic['shop_count'] = region_shopCount_2
                update_dic['lv'] = 3
                update_dic['type']='url'#获取的是抓取地址信息
                try:
                    con_crawler.crawler.dianping.update(condition_dic, {'$set':update_dic},upsert=True,multi=False)        
                except:
                    print 'errors---',traceback.print_exc()
                
def insert_db_dianping_id_info():
    condition = {}
    #condition['_id'] = 53
    condition['type']='url'
    dianping_crawler_urls = con_crawler.crawler.dianping.find (condition)
    print dianping_crawler_urls.count()
    for dianping_crawler_url in dianping_crawler_urls:
        region_url =  dianping_crawler_url.get('region_url')
        shop_count = int(dianping_crawler_url.get('shop_count',0))
        page_count =1
        page_count,tail=divmod(shop_count,15)#获取总分页数
        if tail is not 0:
            page_count+=1
        print 'total:',page_count,'page_count',page_count
        print 'region_url',region_url
        for i in range(1,page_count+1):
            dianping_ids =  get_dianping_id(region_url,i)
            
            try:
            
                for dianping_id in dianping_ids:
                    shop_id = int(dianping_id)
                    update_dic = {}
                    count = con_crawler.crawler.dianping.find({'shop_id':shop_id}).count()
                    print 'count',count
                    if count>0:
                        continue
        #                 condition_dic = {}
        #                 condition_dic['shop_id'] = shop_id
        #                 try:
        #                     con_city.city.shzl.update(condition_dic, {'$set':update_dic},upsert=True,multi=False)        
        #                 except:
        #                     print 'errors---',traceback.print_exc()
                    else:
                        condition_dic = {}
                        condition_dic['_id'] = get_auto_inc()
                        update_dic['shop_id'] = shop_id
                        update_dic['region_url'] = region_url
                        update_dic['type'] = 'dianping_id'
                        print 'sss'
                        try:
                            con_crawler.crawler.dianping.update(condition_dic, {'$set':update_dic},upsert=True,multi=False)        
                        except:
                            print 'errors---',traceback.print_exc()
            except:
                print 'errors---',traceback.print_exc()

#根据business_id号得到商户详细信息
def dianping_demo_find_businesses_by_id(business_id):
    #请替换appkey和secret
#     appkey = "2890921896"
#     secret = "678bd2128edd485b87db32f21c4b21e3"
    
    appkey = get_dianping_config().get('appkey')
    secret = get_dianping_config().get('secret')
    
    apiUrl = "http://api.dianping.com/v1/business/get_single_business"
    #http://api.dianping.com/v1/business/get_single_business?out_offset_type=1&platform=2
    #示例参数
    paramSet = []
    
    paramSet.append(("format", "json"))
    paramSet.append(("business_id", str(business_id)))
    paramSet.append(("out_offset_type", "1"))
    paramSet.append(("platform","2"))
    
    #参数排序与拼接
    paramMap = {}
    for pair in paramSet:
        paramMap[pair[0]] = pair[1]
    
    codec = appkey
    for key in sorted(paramMap.iterkeys()):
        codec += key + paramMap[key]
    
    codec += secret
    
    #签名计算
    sign = (hashlib.sha1(codec).hexdigest()).upper()
    
    #拼接访问的URL
    url_trail = "appkey=" + appkey + "&sign=" + sign
    for pair in paramSet:
        url_trail += "&" + pair[0] + "=" + pair[1]
    
    requestUrl = apiUrl + "?" + url_trail
    
    #模拟请求
    response = urllib.urlopen(requestUrl)
    
    #print response.read()
    the_page = response.read()
    dic_shzl_list = simplejson.loads(the_page, strict=False).get('businesses','') #解析json数据格式
    print  dic_shzl_list
    return dic_shzl_list

#更新点评的详细信息
def update_db_dianping_info():
    
    condition_dic = {}
    condition_dic['shop_id'] = {'$exists':True}
    condition_dic['type'] = 'dianping_id'
    #condition_dic['_id'] =101
    #condition_dic['shop_id']=10486199
    
    dianpings = con_crawler.crawler.dianping.find (condition_dic)
    print dianpings.count()
    
    
    
    for dianping in dianpings:
        dianping_id = int(dianping.get('shop_id',0))
        print '---dianping_id---',dianping_id
        id =  int(dianping.get('_id',0))
        if dianping_id:
            dianping_businesses = dianping_demo_find_businesses_by_id(dianping_id)
            time.sleep(2)
            if not dianping_businesses:
                continue
            try:
                dianping_business = dianping_businesses[0]
                business_id =  dianping_business.get('business_id',0)
                name = dianping_business.get('name','')
                branch_name = dianping_business.get('branch_name','')
                address= dianping_business.get('address','')
                telephone= dianping_business.get('telephone','')
                city= dianping_business.get('city','')
                region = dianping_business.get('regions',[])[0]
                latitude=dianping_business.get('latitude','')
                longitude =dianping_business.get('longitude','')
                avg_price =dianping_business.get('avg_price','')
                s_photo_url = dianping_business.get('s_photo_url','')
                location = {    "lat" : latitude,"lng" : longitude}
        
                update_dic = {}
                update_dic['shmc'] = name
                update_dic['shdz'] = address
                update_dic['lxdh'] = telephone
                update_dic['avg_price'] = avg_price
                #update_dic['uid'] = uid
                #update_dic['city_code'] = 'hangzhou'
                update_dic['district'] = region
                #update_dic['location'] = location
                update_dic['time'] = int(time.time())
                update_dic['editor'] = '小山药'
                update_dic['status'] = 1
                update_dic['status_market'] = 4#信息经过接口处理
                update_dic['type'] = 'info'
                #update_dic['info_type'] =1#
                update_dic['info_from'] ='www.dianping.com'
                #update_dic['business_id'] = business_id
                update_dic['s_photo_url'] = s_photo_url
                update_dic['dianping_detail'] = dianping_business
                
                condition_dic = {}
                condition_dic['_id'] = id
            
                con_crawler.crawler.dianping.update(condition_dic, {'$set':update_dic},upsert=False,multi=False)        
            except:
                print 'errors---',traceback.print_exc()

def subString(string,length):
    if length >= len(string):
        return string
    result = ''
    i = 0
    p = 0
    while True:
        ch = ord(string[i])
        #1111110x
        if ch >= 252:
            p = p + 6
        #111110xx
        elif ch >= 248:
            p = p + 5
        #11110xxx
        elif ch >= 240:
            p = p + 4
        #1110xxxx
        elif ch >= 224:
            p = p + 3
        #110xxxxx
        elif ch >= 192:
            p = p + 2
        else:
            p = p + 1   
            
        if p >= length:
            break;
        else:
            i = p
    return string[0:i]



#增加数据 
def create(url):
#         参数名    参数含义    类型    备注
#         title    poi名称    string(256)    可选 。
#         address    地址    string(256)    可选 。
#         tags    tags    string(256)    可选 。
#         latitude    用户上传的纬度    double    必选 。
#         longitude    用户上传的经度    double    必选 。
#         coord_type    用户上传的坐标的类型    uint32    1：未加密的GPS坐标2：国测局加密3：百度加密必选
#         geotable_id    记录关联的geotable的标识    string(50)    必选，加密后的id 。
#         ak    用户的访问权限key    string(50)    必选。
#         sn    用户的权限签名    string(50)    可选。
#         {column key}    用户在column定义的key/value对    开发者自定义的类型（string、int、double）

    condition_dic = {}
    #condition_dic['_id']=12887
    districts = con_crawler.crawler.district.find (condition_dic)
    print districts.count()
    
    
    
    for district in districts:
       
        values = {}
        values['title']  = district.get('district_name','')
        values['address']  = district.get('address','')
        values['latitude']  = district.get('latitude','')
        values['longitude']  = district.get('longitude','')
        
        values['average_price']  = district.get('average_price','')
        values['city_title']  = district.get('city','')
        values['city_code']  = 'shanghai'
        values['developer']  = district.get('developer','')
        values['introduction']  = subString(district.get('introduction',''),200)
        values['property_expense']  = district.get('property_expense','')
        values['province_title']  = district.get('province','')
        values['url']  = district.get('url','')
        #values['image_list']  = 'http://pic1.ajkimg.com/m/69e1a3dca34c7f2bde1b0226d0c6c2c4/420x420.jpg$$http://pic1.ajkimg.com/m/54258c304d52b91a1319d24fa7a6bf9b/420x420.jpg$$http://pic1.ajkimg.com/m/bfd91641bbe5c5d6000207aacb656de8/420x420.jpg$$http://pic1.ajkimg.com/m/03205fc1407ede4f3dcb195bed5e291a/420x420.jpg$$http://pic1.ajkimg.com/m/980508dd6781ef9480f329a570fb1c7e/420x420.jpg,http://include.aifcdn.com/touchweb/2013_45_08_1/touch/img/large_img_default.gif'
        str_split = district.get('image_list','').split('$$')
        if len(str_split)>=2:
            values['image_list']  = str_split[0]+'$$'+ str_split[1]
        else:
            values['image_list']  = str_split[0]
        '''
        values['title']  = '象屿郦庭'
        values['address']  = '一二八纪念东路新二路'
        #values['tags']  = '球馆'
        values['latitude']  = '31.33'
        values['longitude']  = '121.48'
        
        values['average_price']  = '28044'
        values['city_title']  = '上海'
        values['city_code']  = 'shanghai'
        values['developer']  = '上海象屿房地产开发有限公司'
        #values['introduction']  = '象屿郦庭为小型精品社区，由9幢经典ART DECO风格小高层组成，自带社区商业。户型注重全功能高附加空间原则，是淞南高境板块少有的高品质精装修项目。项目所在位置配套成熟，交通便利；西侧紧靠教育配套成熟的新江湾城板块。。象屿郦庭占地4.2万方，规模适中，属于精品的小型社区，便于管理；本身自带配套有菜市场、商业街与健身会所，极大提高了业主的日常生活与休闲娱乐；50%的绿化率，使小区景观优美，环境更舒适。。象屿郦庭的住宅设计体现舒适性，注重户型全功能高附加空间，做到明厅、明卧、明卫、明厨、明餐厅的全明设计。入户花园计算一半面积，花池全送，所有的飘窗隔断墙交房时均会打掉，与地面同一水平，边套户型更赠送270度的转角落地飘窗，增加使用空间，但不计入建筑面积，总的赠送面积近10平米，使其空间利用率更高。户型多为小面积定位，2房约74-90平米、3房110—130平米，大部分是一梯二户的板式楼，在各功能区舒适度保持的前提下有效的控制了面积，大幅降低了购房成本与支出。。象屿郦庭位于宝山高境，高境镇是宝山区房地产发展中较受关注的区域之一。该区域距中心城区近，西有共和新路高架和地铁1号线延伸线，东有逸仙路'
        values['introduction']  = '象屿郦庭为小型精品社区，由9幢经典ART DECO风格小高层组成，自带社区商业。户型注重全功能高附加空间原则...'
        values['property_expense']  = '2.1 元/平米·月'
        values['province_title']  = '上海'
        values['url']  = 'http://m.anjuke.com/sh/community/370229/'
        #values['image_list']  = 'http://pic1.ajkimg.com/m/69e1a3dca34c7f2bde1b0226d0c6c2c4/420x420.jpg$$http://pic1.ajkimg.com/m/54258c304d52b91a1319d24fa7a6bf9b/420x420.jpg$$http://pic1.ajkimg.com/m/bfd91641bbe5c5d6000207aacb656de8/420x420.jpg$$http://pic1.ajkimg.com/m/03205fc1407ede4f3dcb195bed5e291a/420x420.jpg$$http://pic1.ajkimg.com/m/980508dd6781ef9480f329a570fb1c7e/420x420.jpg,http://include.aifcdn.com/touchweb/2013_45_08_1/touch/img/large_img_default.gif'
        values['image_list']  = 'http://pic1.ajkimg.com/m/69e1a3dca34c7f2bde1b0226d0c6c2c4/420x420.jpg$$http://pic1.ajkimg.com/m/54258c304d52b91a1319d24fa7a6bf9b/420x420.jpg'
        '''
        
        
        values['coord_type']  = '1'
        values['geotable_id']  = '41830'
        values['ak']  = 'B37c30b0997565ed9ff385c3f7b912c1'
        data = urllib.urlencode(values)
        print data
        req = urllib2.Request(url, data)
        response = urllib2.urlopen(req)
        the_page = response.read()
        print the_page       


#创建字段接口
'''
http://api.map.baidu.com/geodata/v2/column/create    // POST请求
2.6.2 请求参数

参数名    参数含义    类型    备注
name    column的属性中文名称    string(45)    必选
key    column存储的属性key    string(45)    必选，同一个geotable内的名字不能相同
type    存储的值的类型    uint32    必选，枚举值1： Int64, 2:double, 3,string
max_length    最大长度    uint32    最大值2048，最小值为1，针对string有效，并且string时必填。此值代表utf8的汉字个数，不是字节个数
default_value    默认值    string(45)    设置默认值
is_sortfilter_field    是否检索引擎的数值排序筛选字段    uint32    必选
1,代表支持，0为不支持。只有int或者double可以设置
is_search_field    是否检索引擎的文本检索字段    uint32    必选
1,代表支持，0为不支持。只有string可以设置检索字段只能用于字符串类型的列且最大长度不能超过512个字节
is_index_field    是否存储引擎的索引字段    uint32    必选
用于存储接口查询:1,代表支持，0为不支持。
geotable_id    所属于的geotable_id    string(50)     
ak    用户的访问权限key    string(50)    必选。
sn    用户的权限签名    string(50)    可选。
'''
#创建字段接口
def column_create(url):
    #生成一个可以查询的字段
    #values = {'name':'test','key':'test','geotable_id':41830,'type':3,'max_length':512,'is_sortfilter_field':0,'is_search_field':1,'is_index_field':1,'ak':'B37c30b0997565ed9ff385c3f7b912c1'}
    #生成一个可以过滤的字段
    values = {'name':'test','key':'test_price','geotable_id':41830,'type':2,'is_sortfilter_field':1,'is_index_field':1,'ak':'B37c30b0997565ed9ff385c3f7b912c1'}
    data = urllib.urlencode(values)
    print data
    req = urllib2.Request(url, data)
    response = urllib2.urlopen(req)
    the_page = response.read()
    print the_page



#创建表接口                
def geotable_create(url):  

    values = {'name':'小区信息表_test','geotype':'1',
                                 'is_published':'1','is_published':'1'
                                 ,'ak':'B37c30b0997565ed9ff385c3f7b912c1',}
    data = urllib.urlencode(values)
    print data
    req = urllib2.Request(url, data)
    response = urllib2.urlopen(req)
    the_page = response.read()
    print the_page
 
#创建信息表            
def test001():
    url ='http://api.map.baidu.com/geodata/v2/geotable/create'
    geotable_create(url)
    #return True
 
#增加数据测试   
def test002():
   url='http://api.map.baidu.com/geodata/v2/poi/create'
   create(url)
   
#增加列字段
def test003():
   url='http://api.map.baidu.com/geodata/v2/column/create'
   column_create(url)   
   
    
if __name__ == '__main__':
    #pass
    #test001()
    #test002()
    test003()
    #print subString('',2)
#     str = '111$$2222$$333'
#     #str =''
#     str_split = str.split('$$')
#     print len(str_split),str_split[0],str_split[1]

