import scrapy
import urllib.request
import urllib.parse
from lxml import etree
import ssl

from baiduspider.db.mongodb import mongodb
from baiduspider.model import baseItem
from baiduspider.model.baiduItem import baiduItem
from baiduspider.model.tianyaItem import tianyaItem


class zhihuSpider(scrapy.Spider):
    name = 'wordSpider'
    allowed_domains = ['baike.baidu.com']
    ssl._create_default_https_context = ssl._create_unverified_context
    start_urls = ['https://baike.baidu.com/item/' + urllib.parse.quote('三星')]

    def parse(self, response):
        print(111)

        context = response.xpath('/html/head/title/text()')

        # 提取网站标题
        title = context.extract_first()
        print(title)

    pass


def query(url):
    # 请求地址

    html = queryHtml(url)
    # 使用 xpath 匹配数据，得到匹配字符串列表
    # 过滤数据，去掉空白
    # sen_list_after_filter = [item.strip('\n') for item in sen_list]

    # 将字符串列表连成字符串并返回
    sen_list = html.xpath('//td[contains(@class,"td-title faceblue")]//a/text()')
    link_list = html.xpath('//td[contains(@class,"td-title faceblue")]//a/@href')
    # subhtml=query(link)
    # contentlist=subhtml.xpath('//div[contains(@class,"bbs-conten")]//text()')
    # print(contentlist)
    return link_list


def queryHtml(url):
    ssl._create_default_https_context = ssl._create_unverified_context
    # 请求头部
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }
    # 利用请求地址和请求头部构造请求对象
    req = urllib.request.Request(url=url, headers=headers, method='GET')
    # 发送请求，获得响应
    response = urllib.request.urlopen(req)
    # 读取响应，获得文本
    text = response.read().decode('utf-8')
    # 构造 _Element 对象
    html = etree.HTML(text)

    return html


def queryContent(url):
    html = queryHtml(url)

    fullText=html.xpath('//div[contains(@class,"atl-item")]')
    author = str(queryAuthor(url)[0])
    result = [[]]

    for t in fullText:
         index=0

         text = t.xpath('.//div[contains(@class,"bbs-content")]//text()')
         if len(text) == 0:
             text = t.xpath('.//div[contains(@class,"content")]//br/text()')
         if len(text) == 0:
             text = t.xpath('.//div[contains(@class,"content")]//text()')
         if len(text) == 0:
             text = t.xpath('.//div[contains(@class,"text")]//text()')
         replyer = t.xpath('.//span[contains(text(),"作者")]//a//text()')


         if len(replyer)==0 or replyer[0]==None:

             b=[author,str(text[0])]
             result[index].append(b)


         else:

             b = [replyer[0], str(text[0])]
             result[index].append( b)



    return result


def queryTitle(url):
    html = queryHtml(url)
    title = html.xpath('//title//text()')
    return title


def queryAuthor(url):
    html = queryHtml(url)

    author = html.xpath('//span[contains(text(),"楼主")]//a//text()')

    return author





def getNextPage(url):
    html = queryHtml(url)
    pageUrl = html.xpath('//a[contains(text(), "下一页")]/@href')
    return pageUrl


if __name__ == '__main__':

            newlink = 'http://bbs.tianya.cn//post-feeling-4312866-1.shtml'



            list = queryContent(newlink)[0]
            title = str(queryTitle(newlink)[0])
            newTitle = title.replace('_情感天地_论坛_天涯社区', '')
            author = str(queryAuthor(newlink)[0])

            print(newlink)
            print(newTitle)
            for text in list:


                content=str(text[1]).replace('\r\n\t\t\t\t\t\t\t\u3000', '')
                content=content.strip()
                replyer=str(text[0])
                b = tianyaItem()
                print(content)
                print(replyer)
                b.author = author
                b.replayer = replyer
                b.title = str(newTitle)
                b.content = content.strip()
                # print(content.strip())
                #mongodb.save(b)

            # mongodb.save(b)


