import scrapy
import urllib.request
import urllib.parse
from lxml import etree
import ssl

from baiduspider.db.mongodb import mongodb
from baiduspider.model import baseItem
from baiduspider.model.baiduItem import baiduItem


class SpiderSpider(scrapy.Spider):
    name = 'spider'
    allowed_domains = ['baike.baidu.com']
    ssl._create_default_https_context = ssl._create_unverified_context
    start_urls = ['https://baike.baidu.com/item/'+urllib.parse.quote('三星')]

    def parse(self, response):
        print(111)

        context = response.xpath('/html/head/title/text()')

        # 提取网站标题
        title = context.extract_first()
        print(title)

    pass


def query(content):
    # 请求地址
    url = 'http://baike.baidu.com/item/' + urllib.parse.quote(content)


    ssl._create_default_https_context = ssl._create_unverified_context
    # 请求头部
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }
    # 利用请求地址和请求头部构造请求对象
    req = urllib.request.Request(url=url, headers=headers, method='GET')
    # 发送请求，获得响应
    response = urllib.request.urlopen(req)
    # 读取响应，获得文本
    text = response.read().decode('utf-8')
    # 构造 _Element 对象
    html = etree.HTML(text)
    # 使用 xpath 匹配数据，得到匹配字符串列表
    sen_list = html.xpath('//div[contains(@class,"lemma-summary") or contains(@class,"lemmaWgt-lemmaSummary")]//text()')
    # 过滤数据，去掉空白
   # sen_list_after_filter = [item.strip('\n') for item in sen_list]

    # 将字符串列表连成字符串并返回
    keylist=html.xpath('//ul[contains(@class,"polysemantList-wrapper cmn-clearfix")]//li//text()')
    catalogs=html.xpath('//div[contains(@class,"catalog-list column-4")]//a/text()')
    for key in keylist:
         if key ==  '▪' :
             pass
         else:
            print(key)
    for cat in catalogs:
          if cat == None or cat == '':
              pass
          else:
            print(cat)
        # result=key.xpath(".//text()")
        # if result =='▪' :
        #      pass
        # else:
        #  print(result)

    return ''.join(sen_list).strip()


if __name__ == '__main__':
    #while True:
        content ='金属' #input('查询词语：')
        result = query(content)
        print("查询结果：",  result)
        print('end...')
        b=baiduItem()
        b.title=content
        b.desc=result
        mongodb.save(b)
