import requests, xlwt
from lxml import etree


class News(object):

    def __init__(self):
        self.url = 'http://www.zjitc.net/xwzx/xyxw.htm'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36'
        }

    def get_data(self, url):
        response = requests.get(url, headers=self.headers)
        return response.content

    def parse_data(self, data):
        data = data.decode()
        # data = data.decode().replace("<!--", "").replace("-->", "")
        html = etree.HTML(data)
        el_list = html.xpath('//div[@class="right-1"]/ul/li/a/@href')
        data_list = []
        for el in el_list:
            t_url = str(el).lstrip('../')
            news_path = "http://www.zjitc.net/" + t_url
            data_list.append(self.look_news(news_path))

        try:
            path = str(html.xpath('//a[contains(text(), "下页")]/@href')[0])
            if path.count('/') > 0:
                next_url = 'http://www.zjitc.net/xwzx/' + path
            else:
                next_url = 'http://www.zjitc.net/xwzx/xyxw/' + path
        except:
            next_url = None

        return data_list, next_url

    def look_news(self, url):
        response = requests.get(url, headers=self.headers)
        news_data = response.content.decode()
        html = etree.HTML(news_data)

        look_url = 'http://www.zjitc.net/system/resource/code/news/click/dynclicks.jsp?clickid=' + url[url.rfind(
            '/') + 1:url.rfind('.')] + '&owner=1207641073&clicktype=wbnews'
        look_response = requests.get(look_url, headers=self.headers)
        look_data = look_response.text

        title = str(html.xpath('//h2')[0].xpath('./text()')[0]).split()[0]
        head = html.xpath('//div[@class="zz"]')
        head1 = str(head[0].xpath('./text()')[0]).split()
        author = head1[0][3:]
        from_w = head1[1][3:]
        head2 = str(head[1].xpath('./text()')[0]).split()
        create_time = head2[0][5:] + ' ' + head2[1]

        return {'title': title, 'author': author, 'from': from_w, 'time': create_time, 'look': look_data}

    def run(self):
        i = 0
        next_url = self.url
        wb = xlwt.Workbook(encoding='utf-8')
        sheet = wb.add_sheet(u'新闻', cell_overwrite_ok=True)
        sheet.write(0, 0, '主题')
        sheet.write(0, 1, '作者')
        sheet.write(0, 2, '来源')
        sheet.write(0, 3, '发布时间')
        sheet.write(0, 4, '浏览量')
        row = 1
        while i < 14:
            data = self.get_data(next_url)
            data_list, next_url = self.parse_data(data)

            for data in data_list:
                title = data.get('title')
                author = data.get('author')
                from_w = data.get('from')
                time = data.get('time')
                look_data = data.get('look')
                sheet.write(row, 0, title)
                sheet.write(row, 1, author)
                sheet.write(row, 2, from_w)
                sheet.write(row, 3, time)
                sheet.write(row, 4, look_data)
                row += 1

            i += 1
            print(next_url)
            if next_url is None:
                break

        wb.save('a.xls')


if __name__ == '__main__':
    news = News()
    news.run()
