
'''
导入插件
pip install requests
pip install BeautifulSoup4
pip install jupyter
python -m pip install mysql-connector
'''
import requests
res = requests.get('http://www.sina.com.cn/')
res.encoding = 'utf-8'
# print(res.text)
from datetime import datetime
from bs4 import  BeautifulSoup
html_sample=' \
<html> \
<body>   \
    <h1 id="title">Hello World!</h1> \
    <a href="http://www.baidu.com" class="link1">This is link</a> \
    <a href="http://www.sougou.com" class="link2">2019-03-24 17:52    \
        <a href="" class="timecla"><span class="time">时间:2019</span></a>    \
    </a> \
</body> \
</html>'

soup = BeautifulSoup(html_sample,'html.parser')
header = soup.select('h1')
#h1标签:[<h1 id="title">Hello World!</h1>]
print('h1标签:',header)
#取得<h1 id="title">Hello World!</h1>
print(header[0])
#取得标签里面的内容  Hello World!
print(header[0].text)

#取得a标签集合[<a class="link" href="#">This is link</a>, <a class="link2" href="#">This is link2</a>]
alink = soup.select('a')
print('alink内容:',alink)
#取得class=link1标签的内容:[<a class="link1" href="#">This is link</a>]
linkList = soup.select(".link1")
print('包含class=link1标签所有内容',linkList)

for x in alink:
    #print(x)
    #取得href=""的内容
    print(x['href'])
print(alink[1])
tim = alink[1].contents[0].strip()
print(datetime.strptime(tim,'%Y-%m-%d %H:%M'))
print(soup.select('.link2 .timecla span')[0].text.strip('时间:'))

url = 'http://news.sina.com.cn/c/nd/2019-03-24/doc-ifxvctcc8121092.shtml'
id = url.split('/')[-1].rstrip('.shtml').lstrip('doc-i')
print(id)




'''==============爬取贝壳新房房源信息===================='''
import mysql.connector
from pip._vendor.distlib import database

def add(name,price,position):
    
    mydb = mysql.connector.connect(
      host="localhost",
      user="root",    
      passwd="root",
      database="springboot" 
    )
    mycursor = mydb.cursor()
    sql = "INSERT INTO houseInfo (houseName, housePrice,position) VALUES (%s, %s,%s)"
    val = (name, price,position)
    mycursor.execute(sql, val)
    mydb.commit()

houseInfo=''
for i in range(10):
    url= 'https://cq.fang.ke.com/loupan/pg'+str(i+1)
    html = requests.get(url)
    html.encoding = 'utf-8'
    soup1 = BeautifulSoup(html.text,'html.parser')
    liList = soup1.select('.resblock-list')
    
    for li in range(len(liList)):
        houseInfo+= '楼盘名称:'+soup1.select('.name ')[li].text+',均价:'+ soup1.select('.number')[li].text+',位置:'+soup1.select('.resblock-location')[li].text.strip()+'\n'
        add(soup1.select('.name ')[li].text,soup1.select('.number')[li].text,soup1.select('.resblock-location')[li].text.strip())

print(houseInfo)

"""
爬虫学习视频 :https://study.163.com/course/courseMain.htm?courseId=1003285002
爬虫详细步骤:https://www.jianshu.com/p/01af5cfcc522

1.时间字符串转换
            导入from datetime import datetime
    1.字符串转时间
    dt = datetime.strptime(timeParam,'%Y年%m月%d日 %H:%m')
    2.时间转字符串
    dt.strftime('%Y-%m-%d')
    
2.<span class="span"><p>1</p><p>2</p><p>3</p></span>
    soup.select('.span' p)[:-1]
            可以将最后一个p标签去掉[<p>1</p>,<p>2</p>]
    article=[]
    for p in soup.select('.span' p)[:-1]
        article.append(p.text.strip())
    print(article)
    
            上面相当于下面
    ' '.join([p.text.strip() for p in soup.select('.span' p)[:-1]])
    
    >>['1','2']
    '\n'.join(article)
    >>'1' '2'
3.<a href="">账号:hhl</a>
     soup.select('a').text().strip('账号')
    >>hhl

4.取json里面的数据
    import json
    comments.text内容：var data={"result":{"status":{"msg":"","code":0},"count":{"total":100}}}
    jd = json.loads(comments.text.strip('var data='))
    jd['result']['count']['total']

5.抓取网页地址部分内容
    url = 'http://news.sina.com.cn/c/nd/2019-03-24/doc-i222222.shtml'
    id = url.split('/')[-1].rstrip('.shtml').lstrip('doc-i')
    >>222222
6.使用正则表达式
    import re
    m = re.search('doc-i(.*).shtml',url)
    print(m.group(1))
    >>222222
7.如何将某字符替换到想要的位置
    url = 'http://news.sina.com.cn/c/nd/2019-03-24/doc-i{}.shtml'
    url.format('33333')
    print(url.format('33333'))
    >>http://news.sina.com.cn/c/nd/2019-03-24/doc-i33333.shtml
    
8.使用Pandas整理资料
    import pandas
    df = pandas.DataFrame(news_total)
    df.head();


"""





    
    
    