import requests as req
import time
from bs4 import BeautifulSoup
import random


class Spider():
    def __init__(self):
        self.start_url="https://coding.imooc.com/"  #开始地址

    '''
    获取htm数据
    '''
    def download(self,url):
        time.sleep(1)
        if url is None:
            return None
            # 加进header，伪装成浏览器访问
        header = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
            'Accept-Encoding': 'none',
            'Accept-Language': 'en-US,en;q=0.8',
            'Connection': 'keep-alive'}
        response = req.get(url, headers=header)
        if response.status_code == 200:
            return response.text
        else:
            return None

    '''
    获取列表数据
    '''
    def get_mooc_list(self):
        #获取总页数，设为8，也可以获取
        page = 8
        for i in range(1,9):
            url="https://coding.imooc.com/?sort=0&unlearn=0&page={}".format(i)  #分页格式
            res = self.download(url)
            soup = BeautifulSoup(res,"lxml")
            course_card = soup.findAll(class_='course-card')
            for card in course_card:
                href = self.start_url+( card.find('a').get('href')) #获取详情链接
                #获取详情页数据
                m_data = self.get_mooc_detail(href)
                if m_data is not None:
                    #写入文件 由于列表中的数字，不能直接join，先推导式处理
                    m_info = ','.join([str(x) for x in m_data])
                    print(m_info)
                    self.write_file(m_info)
    '''
    写文件
    '''
    def write_file(self,data):
        with open('mooc.log','a',encoding='utf-8') as fs:
            fs.write(data+'\n')


    '''
    获取详情页数据
    '''
    def get_mooc_detail(self,url):
        mooc_data=[]

        res = self.download(url)
        soup = BeautifulSoup(res,'lxml')
        try:
            #获取详情字段
            title = soup.find('h1').get_text().strip().replace(',','，')
            price = soup.find(class_='ori-price').get_text().strip()
            #难度 中级  时长 15小时20分钟  学习人数 106  综合评分 10.00,采用随机数模拟
            ls = [ x.get_text() for x in soup.findAll(class_='nodistance')]
            level,timer,nums,score=ls[0],ls[1],ls[2],random.randint(5,10)
            mooc_data.append(title)
            mooc_data.append(price)
            mooc_data.append(level)
            mooc_data.append(timer)
            mooc_data.append(nums)
            mooc_data.append(score)
            return mooc_data
        except Exception as e:
            print(e)
        else:
            return None

if __name__ == '__main__':
    spider = Spider()
    spider.get_mooc_list()

