# -*- coding: utf-8 -*-
# @Time    : 2018/3/25 10:02
# @Author  : shiweixian
# @Description : pdf爬取及分析
import os
import re
import csv
import requests
from bs4 import BeautifulSoup


def get_cazy_info():
    """
    获取CAZy网站的蛋白质、菌名等信息
    :return:
    """
    headers = {'Acccept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
               "Cache-Control": "max-age=0", "Host": "www.cazy.org", "Proxy-Connection": "keep-alive",
               "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.146 Safari/537.36"}
    cookies = requests.get('http://www.cazy.org/', headers=headers).cookies
    baseUrl = "http://www.cazy.org/GH11_characterized.html?debut_FUNC="
    list = []
    print("开始遍历CAZY网站")
    for i in range(3):
        number = i * 100
        url = baseUrl + str(number) + '#pagination_FUNC'
        print(url)
        soup = BeautifulSoup(requests.get(url, headers=headers, cookies=cookies).content, 'lxml')
        tables = soup.find(id='pos_onglet').find_all(valign="top")  # tr列表

        for tr in tables:
            dict = {}
            soup = BeautifulSoup(str(tr), 'lxml')
            tds = soup.find_all(id="separateur2")
            proteinName = tds[0].text.strip()
            organism = str(tds[2])
            genBank = str(tds[3])
            uniPort = str(tds[4])
            # 菌名链接字符串截取
            link_start_index = organism.index("href=\"") + 6
            link_end_index = organism.index("\"", link_start_index + 3)
            dict['protein_name'] = proteinName
            dict['org_name'] = tds[2].text.strip()
            dict['org_link'] = organism[link_start_index:link_end_index]

            # uniPort链接字符串截取
            if uniPort.__contains__("href=\""):
                link_start_index = uniPort.index("href=\"") + 6
                link_end_index = uniPort.index("\"", link_start_index + 3)
                link = uniPort[link_start_index:link_end_index]
                name = link[link.rindex("/") + 1:]
                dict['link'] = link
                dict['name'] = name
                # dict['acid'] = get_uni_acid_sequences(headers, cookies, link)

            # genBank链接字符串截取
            elif genBank.__contains__("href=\""):
                link_start_index = genBank.index("href=\"") + 6
                link_end_index = genBank.index("\"", link_start_index + 3)
                link = genBank[link_start_index:link_end_index]
                name = link[link.index("val=") + 4:]
                dict['link'] = link
                dict['name'] = name
                # dict['acid'] = get_gen_acid_sequences(headers, cookies, link, name)
            list.append(dict)
    print("遍历CAZY网站结束")
    return list


def get_gen_acid_sequences(headers, cookies, url, name):
    """
    获取genBank氨基酸序列
    :param headers:
    :param cookies:
    :param url:
    :return: 氨基酸序列
    """
    # 蛋白质链接
    step_1_link = 'https://www.ncbi.nlm.nih.gov/protein/'
    url = str(requests.post(url, headers=headers, cookies=cookies).url).replace('amp;', '')
    print("链接1:" + url)
    url = str(requests.post(url, headers=headers, cookies=cookies).url).replace('amp;', '')
    print("链接2:" + url)
    print(step_1_link + name)
    content = str(
        BeautifulSoup(requests.get(step_1_link + name, headers=headers, cookies=cookies).content, 'lxml').contents)
    print(content)
    id = content[content.index('ncbi_uid=') + 'ncbi_uid='.__len__():content.index('&', content.index('ncbi_uid='))]
    # 氨基酸序列文本的链接
    acid_link = "https://www.ncbi.nlm.nih.gov/sviewer/viewer.fcgi?db=protein&report=fasta&extrafeat=0&conwithfeat=on&retmode=html&withmarkup=on&tool=portal&log$=seqview&maxdownloadsize=1000000"
    content = str(BeautifulSoup(requests.post(acid_link, headers=headers, data={'id', id}, cookies=cookies).content,
                                'lxml').contents).split('\n')
    acid = ''
    for i in range(content.__len__()):
        acid = acid + str(content[i])
    print(acid)
    return acid


def get_uni_acid_sequences(headers, cookies, url):
    """
    获取uniPort氨基酸序列
    :param headers:
    :param cookies:
    :param url:
    :return: 氨基酸序列
    """
    uniTail = ".fasta"
    soup = BeautifulSoup(requests.get(str(url) + uniTail, headers=headers, cookies=cookies).content,
                         'lxml')
    content = str(soup.contents).split('\n')
    print(content)
    acid = ''
    for i in range(content.__len__()):
        acid = acid + str(content[i])
    return acid


def craw_ncbi(list):
    """
        NCBI网站爬取文档
    """
    write_csv_header('ncbi.csv')

    # 删除标签，只留文本
    format = re.compile(r'<[^>]+>', re.S)

    root_url = 'https://www.ncbi.nlm.nih.gov/'
    headers = {'Host': 'www.ncbi.nlm.nih.gov',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
    cookies = requests.get('https://www.ncbi.nlm.nih.gov', headers=headers).cookies
    search_link = 'https://www.ncbi.nlm.nih.gov/pubmed/?term='
    current_page_size_key = 'EntrezSystem2.PEntrez.PubMed.Pubmed_ResultsPanel.Pubmed_Pager.CurrPage'
    search_data = {
        current_page_size_key: 1,
        'EntrezSystem2.PEntrez.PubMed.Pubmed_ResultsPanel.Pubmed_DisplayBar.PageSize': 200,
        'EntrezSystem2.PEntrez.PubMed.Pubmed_ResultsPanel.Pubmed_DisplayBar.LastPageSize': 200}
    # 搜索每个蛋白质
    for item in list:
        # 使用菌名作为搜索关键词
        search_content = item['org_name']  # + '+' + item['protein_name']
        print(search_content)
        soup = BeautifulSoup(
            requests.post(search_link + search_content, cookies=cookies, data=search_data)
                .content, 'lxml')
        result_count = soup.find(id='resultcount').attrs['value']
        print("数量：" + result_count)
        result_count = int(result_count)
        title_links = []
        # 没有结果
        if result_count <= 0:
            continue
        #  结果多余一页
        elif result_count > 200:
            count = int(result_count / 200) + 1
            # 从第一页到最后一页
            for i in range(1, count + 1):
                search_data[current_page_size_key] = i
                analyse_ncbi_list(root_url, search_link, search_content, cookies, search_data,
                                  title_links)
        # 结果只有一页
        else:
            search_data[current_page_size_key] = 1
            analyse_ncbi_list(root_url, search_link, search_content, cookies, search_data, title_links)

        item['ncbi_title_links'] = title_links

        # 分析摘要
        analyse_ncbi_abstract(item, cookies, format)


def analyse_ncbi_list(root_url, search_link, search_content, cookies, data, title_links):
    """
    分析ncbi搜索结果的网页
    :param root_url: 网站地址
    :param search_link: 搜索网页的地址
    :param search_content: 搜索内容
    :param cookies:
    :param data: 要发送的数据
    :return:
    """
    soup = BeautifulSoup(
        requests.post(search_link + search_content, cookies=cookies, data=data)
            .content, 'lxml')
    nodes = soup.select('.rprt')
    for node in nodes:
        a = node.select_one('a')
        href = root_url + a.attrs['href']
        title = a.text
        if title.__contains__(search_content):
            title_links.append(href)


def analyse_ncbi_abstract(item, cookies, format):
    title_links = item['ncbi_title_links']
    for title_link in title_links:
        soup = BeautifulSoup(requests.get(title_link, cookies=cookies).content, 'lxml')
        # 获取摘要，删除无用的标签
        abstract = format.sub('', str(soup.find('abstracttext')))
        # 获取文档标题
        title = soup.select_one('.rprt_all').find(name='h1').text
        print("文档标题：" + title)
        # 获取文档链接
        document_url = 'http:' + soup.select_one('.aux').find(name='a').get('href')
        print('文档链接：' + document_url)
        csv_filename = 'ncbi.csv'
        analyse_abstract(csv_filename, abstract, item, document_url, title)


def craw_web_science(list):
    """
    爬取web of science网站
    :param list:
    :return:
    """
    csv_filename = 'webofscience3.csv'
    write_csv_header(csv_filename)

    base_url = 'http://apps.webofknowledge.com/'
    headers = {'Accept': '*/*',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'}
    res_step1 = requests.get('http://www.webofknowledge.com?&Error=Client.NullSessionID', headers=headers)
    cookies = res_step1.cookies
    sid = str(res_step1.url).split('&')[2].split('=')[1]
    cookies['SID'] = '\"' + sid + '\"'
    cookies['CUSTOMER'] = "\"Shandong University\""
    cookies['E_GROUP_NAME'] = "\"Shandong University\""

    url = 'http://apps.webofknowledge.com/UA_GeneralSearch.do'
    data = {'fieldCount': 2, 'action': 'search', 'product': 'UA', 'search_mode': 'GeneralSearch', 'SID': sid,
            'value(hidInput1)=&value(bool_1_2)': 'AND', 'value(select1)': 'TS', 'value(select2)': 'TS',
            'value(hidInput1)': '', 'value(hidInput2)': '', 'limitStatus': 'expanded', 'ss_lemmatization': 'On',
            'ss_spellchecking': 'Suggest', 'SinceLastVisit_UTC': '', 'period': 'Range Selection', 'range': 'ALL',
            'startYear': 1900, 'endYear': 2018, 'update_back2search_link_param': 'yes', 'ssStatus': 'display:none',
            'ss_showsuggestions': 'ON', 'ss_query_language': 'auto', 'ss_numDefaultGeneralSearchFields': 1,
            'rs_sort_by': 'PY.D;LD.D;SO.A;VL.D;PG.A;AU.A', 'max_field_notice': '注意: 无法添加另一字段。',
            'input_invalid_notice': '检索错误: 请输入检索词。', 'exp_notice': '检索错误: 专利检索词可在多个家族中找到 (',
            'input_invalid_notice_limits': '<br/>注: 滚动框中显示的字段必须至少与一个其他检索字段相组配。',
            'sa_params': 'UA||' + sid + '|http://apps.webofknowledge.com|'}
    # 摘要的地址
    abstract_url = 'http://apps.webofknowledge.com/ViewAbstract.do?product=UA&search_mode=GeneralSearch&viewType=ViewAbstract&qid=1&SID=' + sid + '&page='
    # 删除标签，只留文本
    format = re.compile(r'<[^>]+>', re.S)
    stop = False
    print("总共的个数：" + str(list.__len__()))
    index = 0
    for item in list:
        index += 1
        # if index < 263:
        #     continue
        print("第 " + str(index) + " 个")
        count = 0
        organism = item['org_name']
        protein_name = re.sub('\(.*?\)', '', item['protein_name'])
        data['value(input1)'] = organism
        data['value(input2)'] = protein_name
        response = requests.post(url, data=data, cookies=cookies, headers=headers)
        response_url = response.url
        soup = BeautifulSoup(requests.get(response_url, cookies=cookies, headers=headers).content, 'lxml')
        contents = str(soup.contents)
        if contents.__contains__('qid='):
            # 获取翻页的qid
            qid = contents[contents.index('qid=') + 4:contents.index('&', contents.index('qid='))]
            # 翻页的地址
            next_page_link = 'http://apps.webofknowledge.com/summary.do?product=UA&parentProduct=UA&search_mode=GeneralSearch&parentQid=&qid=' + qid + '&SID=' + sid + '&sortBy=RS.D;PY.D;AU.A;SO.A;VL.D;PG.A&update_back2search_link_param=yes&page='
        # 如果有搜索结果
        if contents.__contains__('handle_nav_final_counts'):
            total_item_count, page_count = contents[contents.index(
                'handle_nav_final_counts') + 'handle_nav_final_counts'.__len__():contents.index(')', contents.index(
                'handle_nav_final_counts') + 5)].replace('(', '').replace('\'', '').split(',')
            print('条数：' + total_item_count)
            print('页数：' + page_count)
        page_count = int(page_count)
        total_item_count = int(total_item_count)
        start_tag = 'OutboundService.do'
        limit = 10
        for i in range(1, page_count + 1):
            if stop:
                stop = False
                break
            if i != 1:
                if next_page_link is None:
                    break
                response_url = next_page_link + str(i)
                # print(response_url)
                soup = BeautifulSoup(requests.get(response_url, cookies=cookies, headers=headers).content, 'lxml')
            if i == page_count:
                limit = total_item_count - (i - 1) * 10
            for j in range(1, limit + 1):
                # 文档计数
                doc = j + (i - 1) * 10
                openurl = str(soup.find(id='tmp_links_openurl_' + str(doc)))
                document_url = str(base_url + openurl[openurl.index(start_tag):openurl.index('\'', openurl.index(
                    start_tag) + 3)]).replace('amp;', '')
                # 获取摘要
                abstract_url = str(abstract_url + str(i) + '&doc=' + str(doc))
                # print(abstract_url)
                # abstract = format.sub('', str(BeautifulSoup(
                #     requests.get(abstract_url, cookies=cookies,
                #                  headers=headers).content, 'lxml').contents)).replace('[', '').replace(']',
                #                                                                                        '').strip()
                # print('摘要：' + abstract, document_url)
                # 分析摘要
                # analyse_abstract(csv_filename, abstract, item, document_url)
                try:
                    count += 1
                    folder_path = "d:/data/webOfScience2/" + organism.replace(" ", "") + "+" + protein_name.replace(
                        " ", "")
                    if not os.path.exists(folder_path):
                        os.makedirs(folder_path)
                    document_count = os.listdir(folder_path).__len__()
                    if document_count >= 5:
                        count = 0
                        stop = True
                        break
                    pdf_download(document_url, cookies, headers, folder_path, count)

                except requests.exceptions.ConnectionError:
                    print('连接被拒绝，出错链接：' + document_url)
                except PermissionError:
                    print('文件被占用')
                except requests.exceptions.ChunkedEncodingError:
                    print('连接超时,链接：' + document_url)
                except AttributeError:
                    count -= 1
                    print("获取不到下载链接")
                except ConnectionError:
                    print("断网")


def analyse_abstract(csv_filename, abstract, item, document_url='', title=''):
    """
    分析摘要，如果该摘要含有最佳PH等，则保存
    :param abstract:
    :param item:
    :param document_url: 论文文档所在网页的网址
    :return:
    """
    print('开始分析摘要')
    protein_name = re.sub('\(.*?\)', '', item['protein_name'])
    # 如果摘要中含有这个蛋白质的名称
    index = 0
    # if abstract.__contains__(protein_name):
    sentences = abstract.split('.')
    print('包含该蛋白质')
    index += 1
    if index % 10 == 0:
        print('第 ' + str(index) + ' 条')
    # 是否包含最佳PH、温度
    list = []
    for sentence in sentences:
        if sentence.__contains__(' optimum ') or sentence.__contains__(' optimal ') or sentence.__contains__(
                ' activity '):
            words = sentence.split(' ')
            print(sentence)
            dict = {'sentence': sentence}
            for i in range(len(words)):
                if 'ph' == words[i].lower() and words[i + 1] is not None and re.match(
                        '\d+(\.\d+)?(-)?(\d+\.)?(\d+)?', words[i + 1]):
                    pH = words[i + 1]
                    print('PH=' + pH)
                    dict['PH'] = pH
                if words[i] == 'degree' and re.match(
                        '\d+(\.\d+)?(-)?(\d+\.)?(\d+)?', words[i - 1]):
                    print('温度=' + words[i - 1])
                    dict['T'] = words[i - 1]
                elif words[i].__contains__('°C'):
                    T = words[i].replace('°C', '').replace(',', '').replace('.', '').strip()
                    print('温度=' + T)
                    dict['T'] = T
            if 'PH' in dict.keys() or 'T' in dict.keys():
                list.append(dict)

    if list.__len__() > 0:
        file = open(csv_filename, 'a', encoding='utf-8')
        writer = csv.writer(file)
        for dict in list:
            row = [item['org_name'], item['protein_name'], item['name'], dict['PH'] if 'PH' in dict.keys() else '',
                   dict['T'] if 'T' in dict.keys() else '', dict['sentence'], abstract, document_url, title]
            writer.writerow(row)
        file.close()


def pdf_download(document_url, cookies, headers, folder_path, count):
    """
    下载pdf文档并分析
    :param document_url:
    :param cookies:
    :param headers:
    :return:
    """

    step1_url = requests.get(document_url, cookies=cookies, headers=headers).url
    # step2_url = requests.get(step1_url, cookies=cookies, headers=headers).url
    # step3_url = requests.get(step2_url, cookies=cookies, headers=headers).url
    soup = BeautifulSoup(requests.get(step1_url, cookies=cookies, headers=headers).content, 'lxml')
    select = soup.find(id='linksToContent', name='select')
    if select is None:
        return
    option = select.findAll(name='option')[1]
    url = option.attrs['value']
    step4_url = 'http://wa8fu8yt7b.search.serialssolutions.com' + str(url)[1:]
    step5_url = requests.get(step4_url, cookies=cookies, headers=headers).url
    # 判断下载页面类型
    if str(step5_url).__contains__('www.sciencedirect.com'):
        download_page_url = requests.get(step5_url, cookies=cookies,
                                         headers=headers).url
        print("文档下载页面链接1:" + download_page_url)
        soup = BeautifulSoup(requests.get(download_page_url, headers=headers).content, 'lxml')
        content = str(soup.contents)
        tag = 'pdfUrlForCrawlers'
        if not content.__contains__(tag):
            return
        start_index = content.index(tag) + tag.__len__() + 3
        # 有重定向，需要再一步处理
        pdf_url_1 = content[start_index:content.index('\"', start_index + 2)]
        soup = BeautifulSoup(requests.get(pdf_url_1, headers=headers).content, 'lxml')
        pdf_url = soup.find(name='div', id='redirect-message').find(name='a').attrs['href']
        print("PDF下载链接1：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'), pdf_url_1)
    elif str(step5_url).__contains__('springer'):
        print("文档下载页面链接2:" + step5_url)
        pdf_url = 'https://link.springer.com/content/pdf/' + step5_url[step5_url.rindex('/') + 1:] + '.pdf'
        print("PDF下载链接2：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('tandfonline'):
        print("文档下载页面链接3:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        pdf_url = 'https://www.tandfonline.com' + soup.find(class_='pdf-tab', name='li').find(name='a').attrs['href']
        print("PDF下载链接3：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('proquest'):
        print("文档下载页面链接4:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        pdf_url = soup.find(name='a', id='downloadPDFLink').attrs['href']
        print("PDF下载链接4：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('chinesesites'):
        print("文档下载页面链接5:" + step5_url)
        # 没找到下载地址

    elif str(step5_url).__contains__('jbc.org'):
        print("文档下载页面链接6:" + step5_url)
        pdf_url = step5_url + '.full.pdf'
        print("PDF下载链接6：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('ncbi.nlm.nih.gov'):
        print("文档下载页面链接7:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        nodes_a = soup.find(name='div', class_='format-menu')
        if nodes_a is None:
            return
        nodes_a = nodes_a.findAll(name='a')
        pdf_url = None
        for node in nodes_a:
            if node.attrs['href'] is not None and str(node.attrs['href']).__contains__('pdf'):
                pdf_url = 'https://www.ncbi.nlm.nih.gov' + node.attrs['href']
        if pdf_url is None:
            return
        print("PDF下载链接7：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('wa8fu8yt7b'):
        print("文档下载页面链接8:" + step5_url)
    elif str(step5_url).__contains__('pubs.acs'):
        print("文档下载页面链接9:" + step5_url)
        pdf_url = str(step5_url).replace('abs', '').replace("https:/", 'https://').split(
            '/doi/')
        pdf_url = str(pdf_url[0] + '/doi/pdf/' + pdf_url[1]).replace("//", "/")
        print("PDF下载链接9：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('onlinelibrary'):
        print("文档下载页面链接10:" + step5_url)
        pdf_url = step5_url.replace('abs', 'pdf')
        print("PDF下载链接10：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('academic.oup'):
        print("文档下载页面链接11:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        pdf_url = 'https://academic.oup.com' + soup.find(name='a', class_='article-pdfLink').attrs['href']
        print("PDF下载链接11：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('bmcbiotechnol'):
        print("文档下载页面链接12:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        pdf_url = 'https:' + soup.find(name='a', id='articlePdf').attrs['href']
        print("PDF下载链接12：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('pubs.rsc'):
        print("文档下载页面链接13:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        pdf_url = 'http://pubs.rsc.org' + soup.find(name='div', id='DownloadOption').select_one('a').attrs['href']
        print("PDF下载链接13：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('search.sciencemag.org'):
        print("文档下载页面链接14:" + step5_url)
        # 未找到可下载链接
    elif str(step5_url).__contains__('api.scitation.org'):
        print("文档下载页面链接15:" + step5_url)
        soup = BeautifulSoup(requests.get(step5_url, headers=headers).content, 'lxml')
        pdf_url = soup.find(name='div', class_='download-pdf').find(name='a').attrs['href']
        print("PDF下载链接15：" + pdf_url)
        download_pdf(pdf_url, os.path.join(folder_path, str(count) + '.pdf'))
    elif str(step5_url).__contains__('ieeexplore.ieee.org'):
        print("文档下载页面链接16:" + step5_url)

    else:
        print("其他文档下载页面链接:" + step5_url)


def download_pdf(url, filename, refer_url=''):
    """
    下载PDF
    :param url:
    :return:
    """
    headers_2 = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36',
        'Referer': refer_url}

    r = requests.get(url, stream=True, headers=headers_2)
    with open(filename, "wb") as code:
        code.write(r.content)


def write_csv_header(filename):
    file = open(filename, 'w', encoding='utf-8')
    csv_header = ['organism', 'protein', 'genBankOrUniPort', 'PH', 'temperature', 'sentence', 'section', 'document_url',
                  'title']
    writer = csv.writer(file)
    writer.writerow(csv_header)
    file.close()


list = get_cazy_info()
# craw_ncbi(list)
craw_web_science(list)
