# coding=utf-8
import urllib
import http.cookiejar
import urllib.request
from bs4 import BeautifulSoup
from lxml import etree
import  requests
import csv
from  datetime  import  *
import re
#目的获取登录页下发的cookie

#登录页的地址
url = "http://120.236.178.42:8888/cars/"
#登录的请求头部
header = {
    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36"
}
#构建请求
request = urllib.request.Request(url = url,headers = header)

#创建cookie容器，重写URLlib的opener
cookie = http.cookiejar.MozillaCookieJar("cars.txt") #创建一个将cookie保存到本地文件的cookie实例

#创建一个URLlib2的容器来加载cookie
handler = urllib.request.HTTPCookieProcessor(cookie)

#创建一个可以保存cookie的opener
opener = urllib.request.build_opener(handler)

#用当前opener打开一个请求，会自动将响应的cookie记录到我们指定的文件当中

response = opener.open(request)

#保存cookie

cookie.save(ignore_expires = True,ignore_discard = True)
    #ignore_expires 如果存在，就覆盖cookie
    #ignore_discard 如果cookie也保存

content = response.read()

#构建HTML结构

html = etree.HTML(content)

"""
lt = html.xpath('//input[@name="lt"]')[0].attrib.get("value") #attrib 查看匹配标签所有的属性
execution = html.xpath('//input[@name="execution"]')[0].attrib.get("value") #attrib 查看匹配标签所有的属性
_eventId = html.xpath('//input[@name="_eventId"]')[0].attrib.get("value") #attrib 查看匹配标签所有的属性
 "lt":lt,
    "execution":execution,
    "_eventId":_eventId
"""
data = {
    "userName":"anlianbaoxian",
    "password":"123456",
    "Submit":""

}
#print(data)

sendData = urllib.parse.urlencode(data).encode(encoding='UTF8') #封装请求数据

login_url = "http://120.236.178.42:8888/cars/Login_login.do"

header["Referer"] = "http://120.236.178.42:8888/cars/login.jsp"

login_request = urllib.request.Request(url = login_url,data = sendData,headers = header)

#携带cookie进行请求
response = opener.open(login_request)

cookie.save(ignore_expires = True,ignore_discard = True)
"""
content = response.read()
print(content)
"""
#模拟登陆cookie获取之后跳转到个人中心
list = [];
errNum = 0;
#38页出现问题
for page in range(1277,1669):
    my_url = "http://120.236.178.42:8888/cars/Contract_toSearch.do"
    header["Referer"] = "http://120.236.178.42:8888/cars/Contract_toSearch.do"
    header["Host"]="120.236.178.42:8888"
    header["Origin"]="http://120.236.178.42:8888"
    pageData ={
        "adminId":"676",
        "queryDto.customerName":"",
        "queryDto.nameId":"0",
        "queryDto.carVin":"",
        "queryDto.beginDate":"",
        "queryDto.endDate":"",
        "queryDto.mobile":"",
        "queryDto.carCode":"",
        "queryDto.carId":"0",
        "branchName":"",
        "queryDto.branchId":"0",
        "queryDto.beginContDate":"",
        "queryDto.endContDate":"",
        "queryDto.idNumber":"",
        "queryDto.contractCode":"",
        "queryDto.seller":"0",
        "queryDto.contStatus":"0",
        "currentPage":page,
        "eachPage":"20"
        }

    sendData = urllib.parse.urlencode(pageData).encode(encoding='UTF8') #封装请求数据
    my_request = urllib.request.Request(url = my_url,data = sendData,headers = header)
    my_response = opener.open(my_request)
    cookie.save(ignore_expires = True,ignore_discard = True)
    body = my_response.read()
    soup = BeautifulSoup(body.decode('UTF8'),"html5lib")
    table3=soup.find_all('tbody')
    tbody = table3[1]
    for idx, tr in enumerate(tbody.find_all('tr')):
        if idx != 0 and idx != 1:
            id = tr["id"];
            name = ''
            phone = ''
            carName = ''
            vinNo = ''
            shopName = ''
            sale = ''
            td_url=''
            SingInDate =''
            td_url = 'http://120.236.178.42:8888/cars/Contract_contractDetail.do?id=' + id
            for tdx, td in enumerate(tr.find_all('td')):
                if tdx == 1:
                    name = td.text
                elif tdx == 2:
                    phoneList = re.findall(r'\d+', td.text);
                    for a ,no in enumerate(phoneList):
                        phone += no

                elif tdx == 3:
                     carName = td.text
                elif tdx == 4:
                    vinNo = td.text
                elif tdx == 7:
                    mat = re.search(r"(\d{4}-\d{1,2}-\d{1,2})",td.text)
                    SingInDate = mat.group(0)
                elif tdx == 8:
                    shopName = td.text
                elif tdx == 9:
                    sale = td.text.strip()
            list.append([str(id),str(td_url), str(name), str(phone), str(carName), str(vinNo), str(SingInDate),str(shopName), str(sale)])

    print(list)
    with open("data1.csv", "a+") as datacsv:
            # dialect为打开csv文件的方式，默认是excel，delimiter="\t"参数指写入的时候的分隔符
        csvwriter = csv.writer(datacsv,dialect=("excel"))
            # csv文件插入一行数据，把下面列表中的每一项放入一个单元格（可以用循环插入多行）
        for id, item in enumerate(list):
             try:
                 csvwriter.writerow(item)
             except Exception as err:
                 errNum=errNum+1;
                 print(item);
                 print(err);
    list=[]
    print('写入'+str(page)+'结束')

print('出错总数'+str(errNum))



"""
dataUrl = "http://120.236.178.42:8888/cars/Contract_contractDetail.do?"
data={}
data['id']='31382'
url_parame=urllib.parse.urlencode(data)
all_url=dataUrl+url_parame

header['Host']='120.236.178.42:8888'
header['Accept']='text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
request1 = urllib.request.Request(url = all_url,headers = header)

urlResponse1= opener.open(request1)
cookie.save(ignore_expires=True, ignore_discard=True)
body1 = urlResponse1.read()
soup1 = BeautifulSoup(body1.decode('utf-8'), "html5lib")
print(soup1)
"""
"""
params={'id':'31367'}
r=requests.get("http://120.236.178.42:8888/cars/Contract_contractDetail.do?id=31367",params=params,cookies=cookie)
print(r.text)
"""






'''
urljoin ='Contract_contractDetail.do?id='+id;
跳转到订单详情页获取客户身份证以及地址电话等信息
'''

