# @Time:2018.7.19
# @Author:menxging

import pymysql
import re
import json
import requests
from bs4 import BeautifulSoup
from lxml import etree
# from core500_words import CORE500WORDS


class SpiderIciba(object):
    """
    爬取http://www.iciba.com 上的单词数据
    目标数据：1）word 单词； 2)soundmark 音标； 3）trans 翻译； 4）part_of_speech 词性； 5)example_sentense 例句
    6）USA_pronunciation 美式发音； 7）UK_pronunciation 英式发音； 8）word_root_or_affix 词根/词缀;
    9) synonymy 近义词； 10)antonym 反义词
    """

    def __init__(self):
        """
        初始化爬虫参数
        :return:
        """
        self.base_url = 'http://www.iciba.com/'
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}

    def run_spider(self, word):
        """
        爬取数据
        :return:
        """
        # 需要爬取的单词
        # 完整url
        url = self.base_url + word
        # 返回结果
        response = requests.get(url=url, headers=self.headers)

        return response.content

    def parse_data(self, response):
        """
        分析数据，摘取目标数据
        :return:data_dict
        """
        # 使用xpath进行数据解析
        # 解析网页内容
        soup = BeautifulSoup(response, 'lxml')
        # 格式化输出
        # print(soup)

        # 抽取出相应的字段
        # 1) 单词
        word = soup.select('h1[class="keyword"]')[0].get_text().strip()
        print("---" * 20)
        print("单词："+word)

        # 2）音标
        soundmark = ';'.join([item.get_text() for item in soup.select('.base-speak span span')])
        print("音标："+soundmark)

        # 3) 翻译
        trans = ''.join([item.get_text() for item in soup.select('li[class="clearfix"] span')])
        print("翻译："+trans)

        # 4）词性
        part_of_speech = ';'.join([item.get_text() for item in soup.select('li[class="clearfix"] span[class=prop]')])
        print("词性："+part_of_speech)

        # # # 5) 例句
        # english = len(soup.select('div[class="artical"] div[class="article-section"] div[class="p-container"] p[class="p-english family-english size-english"]'))
        # print(english)
        # # example_sentense = ""

        # 6) 美式发音
        # 使用正则过滤出发音的链接
        pattern = re.compile(r'http://.*?\.mp3')
        USA_pronunciation = pattern.search(str(soup.select('.base-speak span')[0])).group()
        print("美式发音:"+USA_pronunciation)

        # 7）UK_pronunciation 英式发音
        UK_pronunciation = pattern.search(str(soup.select('.base-speak span')[2])).group()
        print("英式发音："+UK_pronunciation)

        # # # 8）word_root_or_affix 词根/词缀
        # word_root_or_affix = soup.select('h1[class="word-root family-chinese size-english"]')
        # print(word_root_or_affix)

        # # 9) synonymy 近义词

        # # 10) antonym 反义词
        #

        # # 返回目标数据的字典形式
        # data_dict = {}
        # return data_dict

    # def save_data(self, data_dict):
    #     """
    #     将数据存储到mysql中
    #     :return:
    #     """
    #     # 使用pymysql进行对数据库的交互
    #     # 创建连接
    #     db = pymysql.connect('localhost', 'root', 'mysql', 'test_1')
    #     # 创建cursor游标
    #     cursor = db.cursor()
    #     # sql 语句
    #     sql = """INSERT INTO TABLE test_1 () VALUES ()"""
    #     # 使用excute方法执行sql语句
    #     try:
    #         cursor.execute(sql)
    #         # 提交
    #         cursor.commit()
    #     except:
    #         # 发生错误则回滚
    #         db.rollback()
    #     # 关闭数据库连接
    #     db.close()

    def start_spider(self):
        """
        主逻辑
        :return:
        """
        # 遍历所有单词，进行数据爬取
        for word in CORE500WORDS:
            # 爬取数据
            response = self.run_spider(word)
            # 没有获得数据就跳过
            try:
                # 分析数据
                self.parse_data(response)

                # 存储数据
                # self.save_data(data_dict)
            except:
                pass


if __name__ == '__main__':
    # 实例化爬虫对象
    spider_iciba = SpiderIciba()
    # 开始
    spider_iciba.start_spider()
