import datetime
import time

from urllib.error import HTTPError


import urllib.request
import urllib.parse

import scrapy
from lxml import etree
import ssl
from dominate.tags import label

from baiduspider.db.mongodb import mongodb
from baiduspider.model import baseItem
from baiduspider.model.baiduItem import baiduItem
from baiduspider.model.tianyaItem import tianyaItem


class zhihuSpider(scrapy.Spider):
    name = 'wordSpider'
    allowed_domains = ['baike.baidu.com']
    ssl._create_default_https_context = ssl._create_unverified_context
    start_urls = ['https://baike.baidu.com/item/'+urllib.parse.quote('三星')]

    def parse(self, response):
        print(111)

        context = response.xpath('/html/head/title/text()')

        # 提取网站标题
        title = context.extract_first()
        print(title)

    pass


def query(url):
    # 请求地址



    html = queryHtml(url)
    # 使用 xpath 匹配数据，得到匹配字符串列表
    # 过滤数据，去掉空白
   # sen_list_after_filter = [item.strip('\n') for item in sen_list]

    # 将字符串列表连成字符串并返回
    sen_list=html.xpath('//td[contains(@class,"td-title faceblue")]//a/text()')
    link_list=html.xpath('//td[contains(@class,"td-title faceblue")]//a/@href')
        # subhtml=query(link)
        # contentlist=subhtml.xpath('//div[contains(@class,"bbs-conten")]//text()')
        # print(contentlist)
    return link_list

def queryHtml(url):
        ssl._create_default_https_context = ssl._create_unverified_context
        # 请求头部
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
        }
        # 利用请求地址和请求头部构造请求对象
        req = urllib.request.Request(url=url, headers=headers, method='GET')
        # 发送请求，获得响应
        try:

          response = urllib.request.urlopen(req)
        except HTTPError:
            raise
        # 读取响应，获得文本
        text = response.read().decode('utf-8')
        # 构造 _Element 对象
        html = etree.HTML(text)

        return  html
def queryContent(url):
    html = queryHtml(url)

    fullText=html.xpath('//div[contains(@class,"atl-item")]')
    author = str(queryAuthor(url)[0])

    result = [[]]

    for t in fullText:


         text = t.xpath('.//div[contains(@class,"bbs-content")]//text()')

         replyer = t.xpath('.//span[contains(text(),"作者")]//a//text()')
         if len(text) == 0:
             text = t.xpath('.//div[contains(@class,"bbs-content")]//br/text()')
         if len(text) == 0:
             text = t.xpath('.//div[contains(@class,"content")]//text()')
         if len(text) == 0:
             text = t.xpath('.//div[contains(@class,"text")]//text()')
         if  len(replyer)==0 or replyer[0]==None:
             if len(text)==0:
                 b=[author,'记录出错']
             else:
                 b=[author,str(text[0])]
             result[0].append(b)
         else:

             b = [replyer[0], str(text[0])]
             result[0].append( b)

    return result
def queryTitle(url):
    try:
        html = queryHtml(url)
        title = html.xpath('//title//text()')
    except HTTPError:
        raise
    return title
def queryAuthor(url):

    html = queryHtml(url)

    author= html.xpath('//span[contains(text(),"楼主")]//a//text()')

    return author

def getNextPage(url):


    html = queryHtml(url)
    pageUrl=html.xpath('//a[contains(text(), "下一页")]/@href')
    return pageUrl
if __name__ == '__main__':
    #mongodb.close()

    nextPage='http://bbs.tianya.cn/list-feeling-1.shtml'

    docIndex =1
    while nextPage  is  not None:
        #
        #
        # except HTTPError:
        #     continue
      # try:
      #       result = query(nextPage)
      # except:
      #   time.sleep(5)
        result = query(nextPage)
        nextPage=str(getNextPage(nextPage)[0])
        nextPage = 'http://bbs.tianya.cn/'+nextPage
        print('=============curpage========'+nextPage)
        for link in result:
            newlink='http://bbs.tianya.cn/'+link
            author = str(queryAuthor(newlink)[0])
            # if author != '文文是xiao猪':
            #     continue

            list = queryContent(newlink)[0]
            title=str(queryTitle(newlink)[0])
            newTitle=title.replace('_情感天地_论坛_天涯社区','')
            print(newlink)
            print('文章：=========='+newTitle)
            print('楼主：'+author)
            docIndex = docIndex + 1
            for text in list:

                replyer = str(text[0])

                content = str(text[1]).replace('\r\n\t\t\t\t\t\t\t\u3000', '')

                b = tianyaItem()
                b.dbname = 'tianya'
                b.colname = 'feeling'
                b.docIndex=docIndex
                b.author = author
                b.replayer = replyer
                b.docUrl=newlink
                b.timestate=datetime.datetime.now()
                b.title = str(newTitle)
                b.content = content.strip()

                if b.replayer == b.author:
                     print('楼主：【'+content.strip()+'】')
                else:
                    print(b.replayer+'评论到：【' + content.strip() + '】')
                mongodb.save(b)



        #mongodb.close()

    print(docIndex)




