"""
1、北京高校排名，按“社会影响”、“升序”排序显示所有数据(文件名：高校排名a.py)
地址：http://www.gaosan.com/gaokao/43980.html
"""
# -*- coding:utf8 -*-

import urllib.request
from bs4 import BeautifulSoup


def adjustStr(content):
    content.replace('')


# 链接地址解析-------------------------------------------------------------------------
url = "http://www.gaosan.com/gaokao/43980.html"

HttpResponseObject = urllib.request.urlopen(url)
# print(HttpResponseObject)
strHtml=HttpResponseObject.read()
# 构建beautifulsoup实例
soup = BeautifulSoup(strHtml.decode('utf-8'), "lxml")
# 第一个参数是要匹配的内容
# 第二个参数是beautifulsoup要采用的模块，即规则

# 以上都是固定套路

# 找到第一层标签------------------------<table width="580px" align="center">------------------------------------------------
data = soup.find_all("table", {"width":"580px", "align":"center"} )
                                    # find_all返回满足条件的【所有】结果
                                    # find返回满足条件的【第1个】结果
# 这样把整个表都提取了，而这个表只有一份，故len(data) == 1
# print(data)
# quit()
cnt = 0
table = []
title = []
for data1 in data:
    # print("len==={}".format(len(data)))
    # print(data1)
    # continue
    # print(len(data1))  # 1
    """
    for tr in data1:        #tr标签
        # print("tr:----------------------------------------------------------{}".format(tr))
        # continue
        lines = []
        i = 0
        for td in tr:       #td标签
            i = i + 1
            if (i == 1):
                title.append(td.text)
            else:
                print(td.text)
                # break
                str_ = td.text.replace(' ','').replace('\n','').replace('\r','').replace('\t','')
                print(str_)

                lines.append((td.text).strip())

        table.append(lines)
"""
print("table+++++++++++++++++++++++++++++==lines:")
print(len(table))
print(table)

