# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 09:40:44 2021

@author: hp
"""

import requests;
import scrapy;
from lxml import html
start_urls = 'https://max.book118.com/html/2019/0815/6153222123002054.shtm';
header = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"}
name = 'tbSpider';
allowed_domains = ['max.book118.com'];
response = requests.get(start_urls,headers = header);
#print(response.text);
htmlresponse = html.fromstring(response.text);
#print(hhh);
headlineNode = htmlresponse.xpath("/html/body/div[3]/div[1]/div[3]/div[2]/div[14]")
print(headlineNode);
#for content in headlineNode:
    #content_list = content.xpath('//*[@id="main"]/div[1]/div[3]/div[2]/div[1]/img').extract();
    #print("________________________________________________________________________");
    #print(content_list);
#%%
import requests;
from bs4 import BeautifulSoup;
import os;

USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
    ]

def getUrl(url):
    try:
        read = requests.get(url);
        read.raise_for_status();
        read.encoding = read.apparent_encoding;
        return read.text;
    except:
        return "连接失败"
def getPic(html):
    soup = BeautifulSoup(html,"html.parser");
    all_img = soup.find('div',attrs={'class':'preview-bd'});
    print(all_img);
#    for img in all_img:
#        src = img['src'];
#        img_url = src;
#        print(img_url);
        
html_url = getUrl("https://max.book118.com/html/2019/0815/6153222123002054.shtm");
getPic(html_url);
#%%
import requests 
from lxml import html
import pandas 

url = "http://www.cnbc.com/"
response = requests.get(url)
doc = html.fromstring(response.text)

headlineNode = doc.xpath('//div[@class="headline"]')
len(headlineNode)

url_list  = []
for node in headlineNode : 
    url_node = node.xpath('./a/@href')
    if url_node : 
        url_list.append(url + url_node[0].strip())
print(url_list)
len(url_list)
#%%
def getUrl(url):
    try:
        read = requests.get(url)  #获取url
        read.raise_for_status()   #状态响应 返回200连接成功
        read.encoding = read.apparent_encoding  #从内容中分析出响应内容编码方式
        return read.text    #Http响应内容的字符串，即url对应的页面内容
    except:
        return "连接失败！"
 
# 获取图片地址并保存下载
def getPic(html):
    soup = BeautifulSoup(html, "html.parser")
    #通过分析网页内容，查找img的统一父类及属性
    all_img = soup.find('ul', class_='thumbnail-group thumbnail-group-165 clearfix').find_all('img') #img为图片的标签
    for img in all_img:
        src = img['src']  #获取img标签里的src内容
        img_url = src
        print(img_url)
if __name__ == '__main__':
   html_url=getUrl("https://findicons.com/search/nature")
   getPic(html_url)