# coding:utf8
import pycurl
import StringIO
import urllib
import json
import re
import hashlib
import sys
import time

import requests

path = sys.path[0]
reload(sys)
sys.setdefaultencoding('utf8')


def try_number(num):
    def _func(func):
        def __func(*args, **kwargs):
            number = 0
            while 1:
                try:
                    print 'hello'
                    return func(*args, **kwargs)
                except Exception, e:
                    print e
                    print '*' * 100
                    number += 1
                    print number
                    if number == num:
                        break
                    continue

        return __func

    return _func


def try_pass(func):
    def _func(*args, **kwargs):
        try:
            return func(*args, **kwargs)
        except Exception, e:
            print e
            pass

    return _func


import requests


def get_proxy():
    use_ip = []
    headers = {"Host": "daili.iphai.com", "Connection": 'keep-alive',
               'Cookie': '__cfduid=dd560303a8760ab99e2e8e1382ad59b0b1447120544; Hm_lvt_1528f7f4830b519951a59e6a1656f499=1447121836; Hm_lpvt_1528f7f4830b519951a59e6a1656f499=1447121922'}
    url = 'http://daili.iphai.com/getapi.ashx?ddh=2858487913&num=15&area=%E5%8C%97%E4%BA%AC&yys=2&am=0&mt=0&fm=text'
    ips = requests.get(url, headers=headers).content
    ips = ips.split('\n')
    print ips
    for i in ips:
        proxy = 'http://' + i.rstrip()
        headers = {"Host": "www.baidu.com"}
        yield proxy


# def get_proxy():
#     use_ip = []
#     headers={"Host":"daili.iphai.com","Connection":'keep-alive','Cookie':'__cfduid=dd560303a8760ab99e2e8e1382ad59b0b1447120544; Hm_lvt_1528f7f4830b519951a59e6a1656f499=1447121836; Hm_lpvt_1528f7f4830b519951a59e6a1656f499=1447121922'}
#     url='http://daili.iphai.com/getapi.ashx?ddh=2858487913&num=10&port=8088&yys=2&am=0&mt=0&fm=text'
#     ips=requests.get(url,headers=headers).content
#     ips = ips.split('\n')
#     print ips
#     for i in ips:
#         proxy='http://'+i.rstrip()
#         headers={"Host":"www.baidu.com"}
#         try:
#             c = pycurl.Curl()
#             c.setopt(c.URL, "http://www.baidu.com")
#             c.setopt(c.PROXY,proxy)
#             c.fp = StringIO.StringIO()
#             c.setopt(pycurl.CONNECTTIMEOUT,8)
#             c.setopt(c.WRITEFUNCTION, c.fp.write)
#             c.perform()
#             html = c.fp.getvalue()

#             #print html
#             return proxy
#         except requests.exceptions.ConnectionError:
#             print proxy
#             continue
#         except requests.exceptions.ReadTimeout:
#             print proxy
#             continue
#         except:
#             print proxy
#             continue
#     return use_ip

def wordlist(name, proxy=''):  # 关键词id
    url = 'https://api.intsig.net/user/CCAppService/enterprise/advanceSearch?device_id=d2d8cf641c5d60ae&domain=%s&start=0&token=3B079AD747694C41NS9H08QL' % urllib.quote(
        str(name))
    print url
    # html = requests.get(url).content
    # return html
    # print url
    head = ['Proxy-Connection:keep-alive', 'User-Agent:CamCard/6.5.1.9544 CFNetwork/758.0.2 Darwin/15.0.0',
            'Host:api.intsig.net']
    c = pycurl.Curl()
    c.setopt(pycurl.VERBOSE, 1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)
    # c.setopt(pycurl.REFERER,'https://p2p.edai.com/')
    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)
    if proxy:
        c.setopt(pycurl.PROXY, proxy)
    # c.setopt(pycurl.PROXY,proxy)
    # c.setopt(pycurl.COOKIE,cookie)
    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    # c.setopt(pycurl.NOSIGNAL, 1)
    # c.setopt(c.POSTFIELDS,urllib.urlencode(data))
    # c.setopt(pycurl.COOKIEJAR, "cookie_file_name")
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def de_lu():
    head = ['User-Agent:CamCard/6.5.1.9544 CFNetwork/758.0.2 Darwin/15.0.0', 'Host:api.intsig.net']
    c = pycurl.Curl()
    # c.setopt(pycurl.VERBOSE,1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)
    c.setopt(pycurl.REFERER, 'https://p2p.edai.com/')
    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)
    # c.setopt(pycurl.PROXY,proxy)
    # c.setopt(pycurl.COOKIE,cookie)
    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    # c.setopt(pycurl.NOSIGNAL, 1)
    # c.setopt(c.POSTFIELDS,urllib.urlencode(data))
    # c.setopt(pycurl.COOKIEJAR, "cookie_file_name")
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL,
             'https://api.intsig.net/user/CCAppService/enterprise/getHotDomain?device_id=d2d8cf641c5d60ae&token=A94F0A295F424D80yC8h7AEL')
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def de_lu1(id):
    head = [
        'User-Agent:Mozilla/5.0 (iPhone; CPU iPhone OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 CamCard_IP_ZHLITE/6.5.1.9544',
        'Host:info.camcard.com', 'Cookie:_cpl=en-us']
    url = 'http://info.camcard.com/?id=%s&device_id=d2d8cf641c5d60ae&code=9d7dd58b47a494f80ede8675a1caf19e528f03ebb7abbd66eb575b4af875899a&from=searchList' % id
    c = pycurl.Curl()
    # c.setopt(pycurl.VERBOSE,1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)

    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)

    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def detail(id, code, token, proxy=''):
    head = ['Accept:*/*',
            'User-Agent:Mozilla/5.0 (iPhone; CPU iPhone OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 CamCard_IP_ZHLITE/6.5.1.9544',
            'Host:api.intsig.net', 'Referer:http://info.camcard.com/']
    url = 'http://api.intsig.net/user/CCAppService/enterprise/getDetail?id=%s&token=%s&code=%s&tip=%s&platform=WX&from=camcard&client_app=web&_=%s&callback=jsonp4' % (
    id, token, code, code, int(time.time() * 1000))

    c = pycurl.Curl()
    c.setopt(pycurl.VERBOSE, 1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)

    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)
    if proxy:
        c.setopt(pycurl.PROXY, proxy)
    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def job(id, code, token):
    head = [
        'User-Agent:Mozilla/5.0 (iPhone; CPU iPhone OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 CamCard_IP_ZHLITE/6.5.1.9544',
        'Host:https://api.intsig.net/']
    url = 'http://api.intsig.net/user/CCAppService/enterprise/getJobsPaging?id=%s&token=%s&from=camcard&code=%s&platform=WX&client_app=web&start=0&callback=jsonp4' % (
    id, token, code)
    c = pycurl.Curl()
    # c.setopt(pycurl.VERBOSE,1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)

    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)

    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def related(id, code, token):  # 关联企业
    head = [
        'User-Agent:Mozilla/5.0 (iPhone; CPU iPhone OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 CamCard_IP_ZHLITE/6.5.1.9544',
        'Host:https://api.intsig.net']
    url = 'http://api.intsig.net/user/CCAppService/enterprise/searchRelatedET?name=%s&token=%s&from=camcard&code=%s&platform=WX' % (
    urllib.quote(str(name)), token, code)
    c = pycurl.Curl()
    c.setopt(pycurl.VERBOSE, 1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)

    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)

    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def news(name):
    head = [
        'User-Agent:Mozilla/5.0 (iPhone; CPU iPhone OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 CamCard_IP_ZHLITE/6.5.1.9544',
        'Host:info.camcard.com']
    url = 'http://info.camcard.com/api/news?l=zh-cn&s=%s' % name
    c = pycurl.Curl()
    c.setopt(pycurl.VERBOSE, 1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)

    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)

    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


def getLaws(id, code, token):  # 法案判决
    head = [
        'User-Agent:Mozilla/5.0 (iPhone; CPU iPhone OS 9_0 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13A344 CamCard_IP_ZHLITE/6.5.1.9544',
        'Host:info.camcard.com']
    # url = 'http://info.camcard.com/api/news?l=zh-cn&s=%s'%name
    # url='http://https://api.intsig.net/user/CCAppService//lawsuit/getLaws?id=%s&start=0&from=camcard&code=%s&platform=WX'%(id,code) #列表法律文书
    url = 'http://api.intsig.net/user/CCAppService/lawsuit/getLaws?name=%s&token=%s&from=camcard&code=%s&platform=WX&client_app=web&start=0' % (
    urllib.quote(id), token, code)
    # url1='http://https://api.intsig.net/user/CCAppService/lawsuit/getLawsDetail?id=%s&from=camcard&code=%s&platform=WX'%(id,code) #详细法律文书
    print url
    c = pycurl.Curl()
    c.setopt(pycurl.VERBOSE, 1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)

    c.setopt(pycurl.CONNECTTIMEOUT, 60)
    c.setopt(pycurl.TIMEOUT, 300)

    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(c.WRITEFUNCTION, c.fp.write)
    c.perform()
    return c.fp.getvalue()


@try_pass
def get_company_list(name, proxy=''):
    url = 'https://api.intsig.net/user/CCAppService/enterprise/getAutoComplete?device_id=d2d8cf641c5d60ae&keyword=%s&token=3B079AD747694C41NS9H08QL' % urllib.quote(
        str(name))
    print url
    head = ['User-Agent:CamCard/6.5.1.9544 CFNetwork/758.0.2 Darwin/15.0.0', 'Host:api.intsig.net']
    c = pycurl.Curl()
    c.setopt(pycurl.VERBOSE, 1)
    c.setopt(pycurl.FOLLOWLOCATION, 1)
    c.setopt(pycurl.MAXREDIRS, 5)
    # c.setopt(pycurl.REFERER,'https://p2p.edai.com/')
    c.setopt(pycurl.CONNECTTIMEOUT, 10)
    c.setopt(pycurl.TIMEOUT, 300)
    # c.setopt(pycurl.PROXY,proxy)
    # c.setopt(pycurl.COOKIE,cookie)
    if proxy:
        c.setopt(pycurl.PROXY, proxy)
    c.setopt(pycurl.HTTPPROXYTUNNEL, 1)
    # c.setopt(pycurl.NOSIGNAL, 1)
    # c.setopt(c.POSTFIELDS,urllib.urlencode(data))
    # c.setopt(pycurl.COOKIEJAR, "cookie_file_name")
    c.fp = StringIO.StringIO()
    c.setopt(pycurl.HTTPHEADER, head)
    c.setopt(pycurl.URL, url)
    c.setopt(pycurl.WRITEFUNCTION, c.fp.write)
    c.perform()
    print c.fp.getvalue()
    if '<title>403 Forbidden</title>' in c.fp.getvalue() or '<title>400 Bad Request</title>' in c.fp.getvalue():
        return c.fp.getvalue()
    else:
        return json.loads(c.fp.getvalue())


@try_number(10)
def company_list(name):  # 获取公司列表
    companyid = get_company_list(name)
    if '<title>403 Forbidden</title>' in companyid or '<title>400 Bad Request</title>' in companyid:
        for proxy in get_proxy():
            companyid = get_company_list(name, proxy)
            if companyid:
                if '<title>403 Forbidden</title>' in companyid or '<title>400 Bad Request</title>' in companyid:
                    continue
                else:
                    return companyid
                    break
            else:
                continue
    else:
        return companyid


# @try_number(10)
def index(sname):
    global name
    name = sname
    print type(name)
    print name
    # companyid = company_list(name)
    companyid = get_company_list(name)
    print companyid
    proxy_ip = ''

    if '<title>403 Forbidden</title>' in companyid or '<title>400 Bad Request</title>' in companyid:
        print 'hello'
        for proxy in get_proxy():
            print '*' * 100
            proxy_ip = proxy
            print '1' * 100
            print proxy_ip
            companyid = get_company_list(name, proxy)
            if companyid:
                if '<title>403 Forbidden</title>' in companyid or '<title>400 Bad Request</title>' in companyid:
                    continue
                else:
                    break
            else:
                continue
    if companyid['message'] == '操作受限':
        print 'hello'
        for proxy in get_proxy():
            print '2' * 100
            proxy_ip = proxy
            companyid = get_company_list(name, proxy)
            if companyid:
                if '<title>403 Forbidden</title>' in companyid or '<title>400 Bad Request</title>' in companyid:
                    continue
                else:
                    break
            else:
                continue
    print companyid
    print 'sccess'
    # companyid = json.loads(companyid)
    try:
        companyid = companyid['data']['items'][0]['id']
    except:
        for proxy in get_proxy():
            print '2' * 100
            proxy_ip = proxy
            companyid = get_company_list(name, proxy)
            companyid = companyid['data']['items'][0]['id']
            if companyid:
                if '<title>403 Forbidden</title>' in companyid or '<title>400 Bad Request</title>' in companyid:
                    continue
                else:
                    break
            else:
                continue

    print companyid
    de_lu()
    html = de_lu1(companyid)
    # open('/Users/xiaodi/Desktop/1.html','a').write(html)
    # print html
    code = re.findall('type="hidden" value="(.+?)" id="hidden_code"', html)[0]
    # print code
    securitykey = 'F460139A79494D11B94A474688685CF6'
    m2 = hashlib.md5()
    src = companyid + securitykey
    m2.update(src)
    token = m2.hexdigest()
    print '~~~~~~~~~~~~~~~~~', companyid
    print '~~~~~~~~~~~~~~~~~~~~~', code
    print '~~~~~~~~~~~~~~~~~~~~~~~~~~', token
    print proxy_ip
    try:
        basicList = detail(companyid, code, token, proxy=proxy_ip)
    except:
        for proxy in get_proxy():
            try:
                basicList = detail(companyid, code, token, proxy)
            except:
                continue

            if '<title>403 Forbidden</title>' in basicList:
                continue
            else:
                break
    if '<title>403 Forbidden</title>' in basicList:
        for proxy in get_proxy():
            basicList = detail(companyid, code, token, proxy)
            if '<title>403 Forbidden</title>' in basicList:
                continue
            else:
                break

    # print basicList
    # print type(basicList)
    num = 0
    if basicList == 'Invalid request':
        num = 0
        for proxy in get_proxy():
            try:
                num += 1
                basicList = detail(companyid, code, token, proxy)
            except Exception, ex:
                print ex
                continue
            if '<title>400 Bad Request</title>' in basicList or '<title>403 Forbidden</title>' in basicList or basicList == 'Invalid request':
                continue
            else:
                break
                # print '#'*100
                # return ''
    print '*' * 100
    print num
    print basicList
    print '*' * 100
    basicList = re.findall('jsonp4\(([\s\S]+)\)', basicList)[0]
    # basicList
    basicList = json.loads(basicList)
    # print json.dumps(basicList,ensure_ascii =False,indent=4)


    enterpriseName = basicList['data']['name']
    frName = basicList['data']['oper_name']
    regNo = ''
    regCap = basicList['data']['regist_capi']
    regCapCur = ''
    openFrom = basicList['data']['term_start']
    esDate = basicList['data']['start_date']
    openTo = basicList['data']['end_date']
    enterpriseType = basicList['data']['econ_kind']
    enterpriseStatus = basicList['data']['status']
    cancelDate = basicList['data']['end_date']
    revokeDate = basicList['data']['end_date']
    try:
        address = json.dumps(basicList['data']['addresses'][0]['address'], ensure_ascii=False)
    except:
        address = '""'
    last_update_time = basicList['data']['last_update_time']
    operateScope = basicList['data']['scope']
    abuItem = ''
    cbuItem = ''
    operateScopeAndForm = basicList['data']['scope']
    regOrg = basicList['data']['belong_org']
    ancheYear = ''
    ancheDate = ''
    industryPhyCode = ''
    industryPhyName = ''
    industryCode = ''
    industryName = ''
    recCap = ''
    oriRegNo = ''
    shareHolderList = ''
    for i in range(len(basicList['data']['partners'])):
        str1 = ''
        data = basicList['data']['partners'][i]
        shareholderName = data['stock_name']
        shareholderType = data['stock_type']
        fundedRatio = data['stock_percent']
        country = ''
        fundedRatio = ''
        try:
            subConam = data['real_capi_items'][0]['real_capi']
            regCapCur = data['real_capi_items'][0]['invest_type']
            conDate = data['real_capi_items'][0]['real_capi_date']
            str1 = '{"shareholderName":"%s","subConam":"%s","regCapCur":"%s","conDate":"%s","fundedRatio":"%s","country":"%s","shareholderType":"%s"}' % (
            shareholderName, subConam, regCapCur, conDate, fundedRatio, country, shareholderType)
            shareHolderList = str1 + ',' + shareHolderList
        except:
            subConam = ''
            regCapCur = ''
            conDate = ''
            shareHolderList = ''

    shareHolderList = '[%s]' % shareHolderList.rstrip(',')
    # print shareHolderList

    personList = ''
    for i in range(len(basicList['data']['employees'])):
        str1 = ''
        data = basicList['data']['employees'][i]
        position = data['job_title']
        name1 = data['name']
        sex = data['sex']
        str1 = '{"position":"%s","name":"%s","sex":"%s"}' % (position, name1, sex)
        personList = str1 + ',' + personList
    personList = '[%s]' % personList.rstrip(',')
    punishBreakList = '[]'
    punishedList = '[]'
    alidebtList = '[]'
    entinvItemList = '[]'
    frinvList = '[]'
    frPositionList = '[]'
    alterList = ''
    for i in range(len(basicList['data']['changerecords'])):
        str1 = ''
        data = basicList['data']['changerecords'][i]
        altDate = data['change_date']
        altItem = data['change_item']
        altBe = data['before_content']
        altAf = data['after_content']
        str1 = '{"altDate":"%s","altItem":"%s","altBe":"%s","altAf":"%s"}' % (altDate, altItem, altBe, altAf)
        alterList = str1 + ',' + alterList
    alterList = '[%s]' % alterList.rstrip(',').replace('\n', '').replace('\r', '')
    # print alterList
    filiationList = []
    for i in basicList['data']['branches']:
        temp_dict = {}
        temp_dict['brName'] = i['name']
        temp_dict['brRegno'] = i['reg_no']
        temp_dict['brPrincipal'] = ''
        temp_dict['cbuItem'] = ''
        temp_dict['brAddr'] = i['belong_org']
        filiationList.append(temp_dict)
    filiationList = '%s' % (json.dumps(filiationList, ensure_ascii=False))
    # print filiationList
    # companyLaws = getLaws(name.encode('utf8'),code,token)
    # if companyLaws == 'Invalid request':
    #     print companyLaws
    #     laws = []
    # else:
    #     companyLaws = json.loads(companyLaws)
    #     laws = json.dumps(companyLaws['data'],ensure_ascii=False)
    sharesFrostList = ''
    sharesImpawnList = ''
    morDetailList = ''
    morguaInfoList = ''
    liquidationList = ''
    yearReportList = ''

    companybasicList = '''
    {"enterpriseName":"%s",
    "frName":"%s",
    "reNo":"%s",
    "regCap":"%s",
    "regCapCur":"%s",
    "esDate":"%s",
    "openFrom":"%s",
    "openTo":"%s",
    "enterpriseType":"%s",
    "enterpriseStatus":"%s",
    "cancelDate":"%s",
    "revokeDate":"%s",
    "address":%s,
    "abuItem":"%s",
    "cbuItem":"%s",
    "operateScope":"%s",
    "operateScopeAndForm":"%s",
    "regOrg":"%s",
    "ancheYear":"%s",
    "ancheDate":"%s",
    "industryPhyName":"%s",
    "industryCode":"",
    "industryName":"",
    "recCap":"",
    "oriRegNo":""}
    ''' % (enterpriseName, frName, regNo, regCap, regCapCur, esDate, openFrom, openTo, enterpriseType, enterpriseStatus,
           cancelDate, revokeDate, address, abuItem, cbuItem, operateScope, operateScopeAndForm, regOrg, ancheYear,
           ancheDate, industryPhyName)
    # print companybasicList

    # return json.loads(companybasicList)
    # companyName = name
    # companynews = news(str(name))
    # companynews = json.loads(companynews)
    # #companynews = json.dumps(companynews['items'])
    # jobs = job(companyid,code,token)
    # print jobs
    # jobs = json.loads(jobs)
    # print jobs
    # jobs = json.dumps(jobs['data']['items'],ensure_ascii=False)
    # for i in companynews['items']:
    #     print type(i)
    #     i['content'] = i['content'].replace('&quot','')
    #     i['title'] = i['title'].replace('&quot','')
    #     i['author'] = i['author'].replace('&quot','')
    # companynews = json.dumps(companynews,ensure_ascii=False)
    # companyrelated = related(companyid,code,token)
    # print companyrelated

    # allstr='{"companyName":"%s","province":"","basicList":[{%s}],"shareHolderList":%s,"personList":%s,"punishBreakList":[],"punishedList":[],"alidebtList":[],"entinvItemList":[],"frinvList":[],"frPositionList":[],"alterList":%s,"filiationList":%s,"caseInfoList":[],"sharesFrostList":[],"sharesImpawnList":[],"morDetailList":[],"morguaInfoList":[],"liquidationList":[],"yearReportList":[],"laws":%s,"jobs":%s,"news":%s}'%(name,companybasicList,shareHolderList,personList,alterList,filiationList,laws,jobs,companynews)
    allstr = '{"companyName":"%s","province":"","basicList":[%s],"shareHolderList":%s,"personList":%s,"punishBreakList":[],"punishedList":[],"alidebtList":[],"entinvItemList":[],"frinvList":[],"frPositionList":[],"alterList":%s,"filiationList":%s,"caseInfoList":[],"sharesFrostList":[],"sharesImpawnList":[],"morDetailList":[],"morguaInfoList":[],"liquidationList":[],"yearReportList":[],"laws":"","jobs":"","news":""}' % (
    name, companybasicList, shareHolderList, personList, alterList, filiationList)
    # print allstr
    # allstr='{"companyName":"%s","province":"","basicList":[%s],"shareHolderList":[],"personList":[],"punishBreakList":[],"punishedList":[],"alidebtList":[],"entinvItemList":[],"frinvList":[],"frPositionList":[],"alterList":[],"filiationList":[],"caseInfoList":[],"sharesFrostList":[],"sharesImpawnList":[],"morDetailList":[],"morguaInfoList":[],"liquidationList":[],"yearReportList":[],"laws":"","jobs":"","news":""}'%(name,companybasicList.strip())

    # return eval(allstr.replace('\n','').replace('<p>','').replace('</p>',''))
    return eval(allstr)


# company_list(u'誉存')



search = index

if __name__ == '__main__':
    # result = company_list(u'阿里巴巴')
    # num=0
    # print result

    #    print company_list(u'360')
    #    print i
    # 测试 有三处地方被注释了 256 286 19i
    # company_list(u'誉存')
    result = index(u'百度在线网络技术(北京)有限公司')

    print json.dumps(result, ensure_ascii=False, indent=4)
    # print i
    # print json.dumps(result,ensure_ascii=False,indent=4)
    # print '*'*50
    # print json.dumps(result,ensure_ascii=False,indent=4)

    # print company_list(u'360')
    # print json.dumps(result,ensure_ascii=False,indent=4)
