'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
#作者：cacho_37967865
#博客：https://blog.csdn.net/sinat_37967865
#文件：BeautifulSoup_model.py
#日期：2018-02-04
#备注：介绍BeautifulSoup4库
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

from bs4 import BeautifulSoup
import requests


url = requests.get('http://www.xingmeng365.com/articles.asp?id=7')
url.encoding = 'gb2312'
html_doc = url.text
soup = BeautifulSoup(html_doc,"lxml")

#print(soup)   #使用BeautifulSoup及解析器lxml解析源代码,能够得到一个 BeautifulSoup 的对象,并能按照标准的格式的结构输出
print(soup.head)                 #获取头部
print(soup.title)                #获取标题
print(soup.h1)                   #获取h1标签
print(soup.title,soup.h1)        #获取title和h1 find(list)

print(soup.p)                    #获取p标签
print(soup.img)                  #获取img标签
print(soup.a)                    #获取a标签

print(soup.title.getText())      #获取title标签的文字内容
print(soup.h1.get_text())        #获取第h1标签的文字内容
print(soup.p.get_text())         #获取p标签的文字内容
print(soup.span.get_text())      #获取span标签的文字内容

print(soup.find_all("img"))      #获取所有img标签,一个集合

print(soup.find(id='link3'))     #获取id为link3的标签

print(soup.find(attrs={"name":"description"})['content'])  # 获取 meta位置description内容

print(soup.find_all('div', class_='list'))  # 获取某个class


#循环获取所有页面链接
for link in soup.find_all('a'):
    print(link.get('href'))

#循环获取页面所有图片链接
for link in soup.find_all('img'):
    print(link.get('src'))

#循环获取页面所有符合要求的图片链接
for link in soup.find_all('img'):
    if "/upload/image" in link.get('src'):
        print(link.get('src'))


# 循环Z获取p标签下所有行
for info in soup.find_all('p'):
    content = info.get_text()
    print(content)

