# -*- coding: utf-8 -*-
import requests
import xlwt
import xlrd
import datetime

from lxml import etree
from get_proxies import hasPoxies



# if __name__ == '__main__':
# print('**********************************即将进行抓取**********************************')
# keyword = input('请输入您要搜索的语言类型：')
# fin_result = []  # 将每页的招聘信息汇总成一个最终的招聘信息
# max_page_num = read_max_page(read_page(url, 1, keyword))
# for page_num in range(1, max_page_num):
# print('******************************正在下载第%s页内容*********************************' % page_num)
# page = read_page(url, page_num, keyword)
# page_result = read_tag(page, tag)
# fin_result.extend(page_result)
# file_name = input('抓取完成，输入文件名保存：')
# save_excel(fin_result, tag_name, file_name)
# endtime = datetime.datetime.now()
# time = (endtime - starttime).seconds
# print('总共用时：%s s' % time)
HREF = []
TITLE = []
PRICE = []
DES = []
STATUS = []


class zbjData(object):
    def get_url(self, fileName, totalPage):
        if fileName == '品牌设计':
            for i in range(totalPage):
                url = 'http://task.zbj.com/t-ppsj/page{}.html'.format(i+1)
                self.spiderPage(url, i+1, totalPage, fileName)

    def spiderPage(self, url, pageNum, totalPage, fileName):
        if url is None:
            return None
        proxiy_url = 'http://www.xicidaili.com/nn/'
        proxiy_headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
        }
        user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.4295.400'
        headers = {'User-Agent': user_agent}
        proxies_ob = hasPoxies(proxiy_url, proxiy_headers)
        try:
            htmlText = requests.get(url, headers=headers).text
            selector = etree.HTML(htmlText)
            tds = selector.xpath(
                '/html/body/div/div[5]/div[3]/div[1]/div[2]/div[1]')  # element: demand-list
            for td in tds:
                href = td.xpath('./div/a/@href')
                title = td.xpath('./div/a/p[1]/span[last()-2]/text()')
                price = td.xpath('./div/a/p[2]/b/text()')
                des = td.xpath('./div/a/p[3]/text()')
                status = td.xpath('./div/a/p[1]/span[last()-1]/text()')
                
                print("------正在爬取第%s页数据,共%s页数据...------" % (pageNum,totalPage))
                self.mergeList(href, title, price, des, status, pageNum, totalPage, fileName)
        except:
            print('读取异常')

    def mergeList(self, href, title, price, des, status, pageNum, totalPage, fileName):
        global HREF
        global TITLE
        global PRICE
        global DES
        global STATUS
        HREF += href
        TITLE += title
        PRICE += price
        DES += des
        STATUS += status
        if (pageNum >= totalPage):
            print("数据已经爬取完成正在合并数据...")
            self.getInfo(HREF, TITLE, PRICE, DES, STATUS, fileName)

    def getInfo(self, href, _title, _price, des, status, fileName):
        arr = []
        for i in range(len(href)):
            o = {}
            o['href'] = href[i]
            o['title'] = _title[i]
            o['price'] = _price[i]
            o['des'] = des[i]
            o['status'] = status[i]
            arr.append(o)
        self.saveXls(arr, fileName)

    def saveXls(self, data, fileName):
        rowTitle = [u'标题', u'价格', u'描述', u'状态', u'地址']
        rowDatas = data
        _arr = ["title", "price", "des", "status", "href"]
        # 创建工作簿
        workbook = xlwt.Workbook(encoding='utf-8')
        # 创建sheet
        data_sheet = workbook.add_sheet('demo')

        # 生成标题栏 x y 数据
        for j in range(0, len(rowTitle)):
            data_sheet.write(0, j, rowTitle[j])

        # 生成数据
        for k in range(0, len(rowDatas)):
            for m in range(0, len(rowDatas[k])):
                data_sheet.write(k+1, m, rowDatas[k][_arr[m]])

        nowTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')[0:10]

        workbook.save('%s-%s.xls' % (fileName,nowTime))
        print("数据爬取结束, 文件%s-%s.xls已保存在当前目录下" % (fileName,nowTime))

    def runSpider(self, fileName, totalPage):
        self.get_url(fileName, totalPage)


if '_main_':
    # 'http://task.zbj.com/t-ppsj/page{}.html'.format(i+1) //品牌设计
    # https://task.zbj.com/page1.html?i=3 // it软件
    zbj = zbjData()
    zbj.runSpider('品牌设计', 100)
