#coding:utf-8
from urllib import request
from urllib import parse
import json
import pymysql
import socket
import time



# 打开数据库连接
db = pymysql.connect("localhost","root","root","zgcl" )

# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()




#网址
#首页 : https://service.cdpf.org.cn/api?method=zclWssp.home.index

#测试页 ： http://sjcj.cdpf.org.cn/integral/page/index.jsp


#要爬的地址

socket.setdefaulttimeout(20)
url = 'https://service.cdpf.org.cn/taiji/app/zclXzsp/module/xzz/xzz.main.jsp?sxlx=CJR&dealtype=2'


#浏览器登录后得到的cookie，也就是刚才复制的字符串
cookie_str = r'JSESSIONID=995D6C834961B5F828E69829A1C14207; SERVERID=cjrfwptsr4; _tj_token_id_=b20cda4a144e4253a8f420acc4a0c971-20180706234129'

req = request.Request(url)

req.add_header('cookie', cookie_str)

req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36')
resp = request.urlopen(req)


headers = {
    #heard部分直接通过chrome部分request header部分
    'Accept':'text/plain, */*; q=0.01',
    'Accept-Language':'zh-CN,zh;q=0.9',
    'Connection':'keep-alive',
    'Content-Type':'application/x-www-form-urlencoded',
    'Referer':'https://service.cdpf.org.cn/taiji/app/zclXzsp/module/xzz/xzz.main.jsp?sxlx=CJR&dealtype=2',
    'User-Agent':'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.23 Mobile Safari/537.36'

}
url2 = 'https://service.cdpf.org.cn/api?method=zclXzsp.xzz.tree'
data = {"nodeId":"330100000000"}

data = parse.urlencode(data).encode('utf-8')
req2 =request.Request(url2,headers=headers, data=data)
req2.add_header('cookie', cookie_str)

page = request.urlopen(req2).read()

page = page.decode('utf-8')

#print(page)
pop_data=json.loads(page)

res_data = pop_data['res_data']
list = res_data['list']


qj_list_id = []
qj_list_name = {}

for item in list:
    qj_list_id.append(item['id'])
    qj_list_name[item['id']]=item['dept_name']
print(qj_list_id)


item_list_id = []
item_list_name = {}
qj_item_list_name = {}

for id in qj_list_id:
    data = {"nodeId":id}
    data = parse.urlencode(data).encode('utf-8')
    req3 =request.Request(url2,headers=headers, data=data)
    req3.add_header('cookie', cookie_str)
    page3 = request.urlopen(req3).read()
    page3 = page3.decode('utf-8')
    pop_data=json.loads(page3)
    res_data = pop_data['res_data']
    list = res_data['list']

    for item in list:
        item_list_id.append(item['id'])
        item_list_name[item['id']]=item['dept_name']
        qj_item_list_name[item['id']] = qj_list_name.get(item['parent_id'])
print("长度--------"+str(len(item_list_id)))
num = 0
for last_item_id in item_list_id[100:len(item_list_id)] :
    num+=1
    print(str(num))
    print(qj_item_list_name.get(last_item_id))
    print(item_list_name.get(last_item_id))
    data = {"nodeId":last_item_id}
    data = parse.urlencode(data).encode('utf-8')
    req3 =request.Request(url2,headers=headers, data=data)
    req3.add_header('cookie', cookie_str)
    page3 = request.urlopen(req3).read()

    page3 = page3.decode('utf-8')
    pop_data=json.loads(page3)
    res_data = pop_data['res_data']
    list = res_data['list']

    time.sleep(1)
    for item in list:
        # SQL 插入语句
        sql = "INSERT INTO region(area_code,name,parent_name,qj_name) VALUES (%s,%s,%s,%s)"
        #sql= sql.format(item['id'],item['dept_name'])
        try:
            # 执行sql语句
            cursor.execute(sql,(item['dept_code'],item['dept_name'],item_list_name.get(last_item_id),qj_item_list_name.get(last_item_id)))
            # 提交到数据库执行
            db.commit()
        except Exception as e:
            # 如果发生错误则回滚
            print(e)
            db.rollback()


print(item_list_name)
#soup=BeautifulSoup(resp.read().decode('utf-8'),features='html.parser')
#li = soup.find(id ="tree").find_all('li')
#print(li)