#------------------------------------------#
# !python3
# coding:utf-8
# Author: Terry
# Descpiton: 抓取推荐词典页面的词典信息
#------------------------------------------#

import urllib.request
import re
from bs4 import BeautifulSoup
from urllib.error import URLError
from urllib.error import HTTPError


# 获取响应的基本步骤
url='http://cidian.haidii.com/center.html' #确定请求对象

#print( res )


#region --片段epic
#endregion

#region --使用beautifulSoup 抽取数据

#获取属性
    # soup.a.attrs 获取得到所有属性和值，是一个字典
    # soup.a.attrs['href'] 获取指定的属性值
    # soup.a['href'] 简写形式

# 获取文本
    # soup.a.string
    # soup.a.text
    # soup.a.get_text()
      # .get_text()回清除你正在处理的html内容的所有标签，因此建议到最后一步再使用，以最大化的利用bs4的筛选功能。

# select方法
    # id选择器 #dudu
    # 类选择器 .xixi
    # 标签选择器 div a h1

try:
    html=urllib.request.urlopen(url) # 获取web响应对象
    #res=html.read().decode('utf-8') # 将文件对象以utf-编码格式解读
    bs=BeautifulSoup(html.read(),'html.parser') # 转成soup对象

    #name_lists=bs.find_all('li',{"class":"ios android"})
    name_lists=bs.find_all('a',{"class":"r_pic"})
    #print (name_lists[1])
    result_list=[]
    for item in name_lists:
        print(item.get('title'),'\t',item.find_all('img')[0]['src'])
        result_list.append(item.get('tilte'))

        
    print("字典总数目：",len(result_list))
    #print(result_list)
    # ins=result_list[-1]
    # print (type(ins))
    
    # 清洗数据
    """ pt_title=re.compile(r'<a.*?title="(.*?)".*?</a>',re.S|re.I)
    with open ("f:\\dics.txt",'a+')as f:
        for i in result_list:
            data=re.finditer(pt_title,i)
            f.write(data)       #保存数据
 """
except HTTPError as e:
    print(e)
except URLError as e:
    print("url not found")
else:
    print ("Anyway it worked")
#endregion 

#region build function

"""
class Spider:

    def __init__(self):
        self.siteURL = 'http://cidian.haidii.com/center.html'

    def getPage(self, page_index=""):

        if page_index == "":
            url = self.siteURL
        else:
            url = self.siteURL + "?page=" + str(page_index)
        response = urllib.request.urlopen(url)
        return response.read().decode('utf-8')

    def getContents(self,page_index=""):
        page = self.getPage(page_index)
        pattern = re.compile('class="ios andorid".*?target="_blank"><img src>="(*?)" alt="(*?)">.*?<p>(*?)</p>',re.S)
        items = re.findall(pattern, page)
        for item in items:
            print (item)

spider = Spider()
spider.getContents()

"""
#endregion

#region --使用正则表达式

""" ptn=re.compile(r'<li class="ios android">[\s\S]*?class="r_pic" title="(.*?)" target="_blank"><img src="[\s\S]*?</li>',re.S)

name_links = re.compile(r'<li> class="ios android"[\s\S]*?title="(.*)" target=target="_blank"><img[\s\S]*?</li>',re.S)
src_links = re.compile(r'class="ios android"[\s\S].*<img src="(.*)" alt=.*?class="r_info"[\s\S].*?</li>',re.S)
info_links=re.compile(r'class="ios android"[\s\S].*<span></span>[\s\S].*<p>(.*)</p>[\s\S]*?<a.*?</li>',re.S)


r=re.match(ptn,res)
print(r) 
"""
# items =re.findall(ptn,res)
# print(len(items))
# print (items[:2])
# for i in items:
#     print(i)


#endregion


#region --使用xpath模块


#endregion

