# -*- coding: utf-8 -*-
from get_proxies import hasPoxies
import requests
import xlwt
import xlrd
from lxml import etree


def get_url():
    totalPage = 335
    for i in range(totalPage):
        # 'http://task.zbj.com/t-ppsj/page{}.html'.format(i+1) //品牌设计
        # https://task.zbj.com/page1.html?i=3 // it软件
        url = 'https://task.zbj.com/page{}.html?i=3 '.format(i+1)
        spiderPage(url, i+1, totalPage)


def spiderPage(url, pageNum, totalPage):
    if url is None:
        return None

    proxiy_url = 'http://www.xicidaili.com/nn/'
    proxiy_headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
    }

    # 修改请求头
    user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'

    headers = {'User-Agent': user_agent}
    proxies_ob = hasPoxies(proxiy_url, proxiy_headers)
    # proxies = proxies_ob.get_proxies()
    # print(proxies)
    # htmlText = requests.get(url).text
    # print(htmlText)
    # selector = etree.HTML(htmlText)
    # tds = selector.xpath('//*[@class="tab-switch tab-progress"]/table/tr')
    try:

        # proxies = {
        #     'http': 'http://218.15.25.153:808'
        # }

        htmlText = requests.get(url, headers=headers).text
        selector = etree.HTML(htmlText)

        # result = etree.tostring(selector)
        # print(result.decode('utf-8'))

        tds = selector.xpath(
            '/html/body/div/div[4]/div[5]/div[1]/div[2]/div[1]')  # element: demand-list

        for td in tds:
            href = td.xpath('./div/a/@href')
            title = td.xpath('./div/a/p[1]/span/text()')
            price = td.xpath('./div/a/p[2]/b/text()')
            des = td.xpath('./div/a/p[3]/text()')
            status = td.xpath('./div/a/p[4]/span/text()')
            # subTitle = td.xpath('./td/p/text()')
            # deadline = td.xpath('./td/span/text()')
            # price = price[0] if len(price) > 0 else ''
            # title = title[0] if len(title) > 0 else ''
            # href = href[0] if len(href) > 0 else ''
            # subTitle = subTitle[0] if len(subTitle) > 0 else ''
            # deadline = deadline[0] if len(deadline) > 0 else ''
            # print(price, title, href, subTitle, deadline)

            _title = str(title).replace('u\'', '\'').decode("unicode-escape")
            # _price = str(price).replace('u\'', '\'').decode("unicode-escape")
            # data.append(href)
            # data.append(title)
            # data.append(price)
            # data.append(des)
            # data.append(status)

            print _title

            mergeList(href, title, price, des, status, pageNum, totalPage)

            # href += href
            # print href
            # print "--------------------getDate doing--------------------"
    except:
        print('error')


def spiderDetail(url):
    if url is None:
        return None
    try:
        htmlText = requests.get(url).text
        selector = etree.HTML(htmlText)
        aboutHref = selector.xpath(
            '//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/a/@href')
        price = selector.xpath(
            '//*[@id="utopia_widget_10"]/div[1]/div/div/div/p[1]/text()')
        title = selector.xpath(
            '//*[@id="utopia_widget_10"]/div[1]/div/div/h2/text()')
        contentDetail = selector.xpath(
            '//*[@id="utopia_widget_10"]/div[2]/div/div[1]/div[1]/text()')
        publishDate = selector.xpath(
            '//*[@id="utopia_widget_10"]/div[2]/div/div[1]/p/text()')
        # python的三目运算 :为真时的结果 if 判定条件 else 为假时的结果
        aboutHref = aboutHref[0] if len(aboutHref) > 0 else ''
        price = price[0] if len(price) > 0 else ''
        title = title[0] if len(title) > 0 else ''
        contentDetail = contentDetail[0] if len(contentDetail) > 0 else ''
        publishDate = publishDate[0] if len(publishDate) > 0 else ''
        print(aboutHref, price, title, contentDetail, publishDate)
    except:
        print('error2')


HREF = []
TITLE = []
PRICE = []
DES = []
STATUS = []


def mergeList(href, title, price, des, status, pageNum, totalPage):
    global HREF
    global TITLE
    global PRICE
    global DES
    global STATUS
    HREF += href
    TITLE += title
    PRICE += price
    DES += des
    STATUS += status
    if (pageNum >= totalPage):
        # 此时爬虫已经结束 需要合并数据
        getInfo(HREF, TITLE, PRICE, DES, STATUS)
        print "game over"


def getInfo(href, _title, _price, des, status):
    arr = []
    for i in range(len(href)):
        o = {}
        o['href'] = href[i]
        o['title'] = _title[i]
        o['price'] = _price[i]
        o['des'] = des[i]
        o['status'] = status[i]
        arr.append(o)

    saveXls(arr)
    # print str(arr).replace('u\'', '\'').decode("unicode-escape")
    # print "--------------------saveDate doing--------------------"


def saveXls(data):
    # f = xlwt.Workbook()  # 创建工作薄
    # # 创建个人信息表
    # sheet1 = f.add_sheet(u'采集信息', cell_overwrite_ok=True)
    rowTitle = [u'标题', u'价格', u'描述', u'状态',u'地址']
    rowDatas = data
    _arr = ["title", "price", "des", "status","href"]

    # # for k in range(0, len(rowDatas)):  # 先遍历外层的集合，即每行数据
    # #     rowDatas[k].insert(0, k+1)  # 每一行数据插上编号即为每一个人插上编号
    # #     for j in range(0, len(rowDatas[k])):  # 再遍历内层集合
    # #         # 写入数据,k+1表示先去掉标题行，另外每一行数据也会变化,j正好表示第一列数据的变化，rowdatas[k][j] 插入数据
    # #         sheet1.write(k+1, j, rowDatas[k][j])
    # for k in range(0, len(rowDatas)):
    #     sheet1.write(k, k, rowDatas[k])
    # # return
    # 创建工作簿
    workbook = xlwt.Workbook(encoding='utf-8')
    # 创建sheet
    data_sheet = workbook.add_sheet('demo')
    # row0 = [u'字段名称', u'大致时段', 'CRNTI', 'CELL-ID']
    # row1 = [u'测试', '15:50:33-15:52:14', 22706, 4190202]

    # 生成标题栏 x y 数据
    for j in range(0, len(rowTitle)):
        data_sheet.write(0, j, rowTitle[j])

    # 生成数据
    for k in range(0, len(rowDatas)):
        for m in range(0, len(rowDatas[k])):
            data_sheet.write(k+1, m, rowDatas[k][_arr[m]])
    print("success")
    # print(len(rowDatas))
    workbook.save('it_ruanjian_335.xls')


if '_main_':
    get_url()


# import requests
# import xlwt
# import xlrd
# from lxml import etree


# class doubanBookData(object):

#     def __init__(self):
#         self.f = xlwt.Workbook()  # 创建工作薄
#         self.sheet1 = self.f.add_sheet(
#             u'图书列表', cell_overwrite_ok=True)  # 命名table
#         self.rowsTitle = [u'编号', u'图书链接', u'书名',
#                           u'评分', u'评分人数', u'图片链接']  # 创建标题
#         for i in range(0, len(self.rowsTitle)):
#             # 最后一个参数设置样式
#             self.sheet1.write(0, i, self.rowsTitle[i], self.set_style(
#                 'Times new Roman', 220, True))
#         # Excel保存位置
#         self.f.save('Book.xls')
#     # 该函数设置字体样式

#     def set_style(self, name, height, bold=False):
#         style = xlwt.XFStyle()  # 初始化样式
#         font = xlwt.Font()  # 为样式创建字体
#         font.name = name
#         font.bold = bold
#         font.colour_index = 2
#         font.height = height
#         style.font = font
#         return style

#     def getUrl(self):
#         for i in range(10):
#             url = 'https://book.douban.com/top250?start={}'.format(i*25)
#             self.spiderPage(url)

#     def spiderPage(self, url):
#         if url is None:
#             return None

#         try:
#             data = xlrd.open_workbook(
#                 'F:/Python/SPIDER/Book.xlsx')  # 打开Excel文件
#             table = data.sheets()[0]  # 通过索引顺序获取table，因为初始化时只创建了一个table，因此索引值为0
#             rowCount = table.nrows  # 获取行数   ，下次从这一行开始
#             proxies = {  # 使用代理IP，获取IP的方式在上一篇文章爬虫打卡4中有叙述
#                 'http': 'http://110.73.1.47:8123'}
#             user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
#             headers = {'User-Agent': user_agent}
#             respon = requests.get(url, headers=headers,
#                                   proxies=proxies)  # 获得响应
#             htmlText = respon.text  # 打印html内容
#             s = etree.HTML(htmlText)  # 将源码转化为能被XPath匹配的格式
#             trs = s.xpath(
#                 '//*[@id="content"]/div/div[1]/div/table/tr')  # 提取相同的前缀
#             m = 0
#             for tr in trs:
#                 data = []
#                 bookHref = tr.xpath('./td[2]/div[1]/a/@href')
#                 bookTitle = tr.xpath('./td[2]/div[1]/a/text()')
#                 bookScore = tr.xpath('./td[2]/div[2]/span[2]/text()')
#                 bookPeople = tr.xpath('./td[2]/div[2]/span[3]/text()')
#                 bookImg = tr.xpath('./td[1]/a/img/@src')
#                 # python的三目运算 :为真时的结果 if 判定条件 else 为假时的结果
#                 bookHref = bookHref[0] if bookHref else ''
#                 bookTitle = bookTitle[0] if bookTitle else ''
#                 bookScore = bookScore[0] if bookScore else ''
#                 bookPeople = bookPeople[0] if bookPeople else ''
#                 bookImg = bookImg[0] if bookImg else ''

#             # 拼装成一个列表
#                 data.append(rowCount+m)  # 为每条书添加序号
#                 data.append(bookHref)
#                 data.append(bookTitle)
#                 data.append(bookScore)
#                 data.append(bookPeople)
#                 data.append(bookImg)

#                 for i in range(len(data)):
#                     self.sheet1.write(rowCount+m, i, data[i])  # 写入数据到execl中

#                 m += 1  # 记录行数增量
#                 print(m)
#                 print(bookHref, bookTitle, bookScore, bookPeople, bookImg)

#         except Exception as e:
#             print('出错', type(e), e)

#         finally:
#             self.f.save('Book.xls')


# if '_main_':
#     dbBook = doubanBookData()
#     dbBook.getUrl()
