import code
from urllib.request import urlopen
from bs4 import BeautifulSoup
import sqlite3

url_head = 'http://db.foodmate.net/yingyang/'


# 获取各食物分类的目录url，返回列表
def get_food_dir_urls():
    urls = [f'type_{i}.html' for i in range(1, 22)]
    return urls


# 获取各食物分类下具体食物的url列表
def get_food_files_urls(init_url):
    urls = []
    text = urlopen(init_url).read()
    soup = BeautifulSoup(text, 'html.parser')

    urls_tag = soup.find_all('li', class_='lie')
    for url_tag in urls_tag:
        urls.append(url_tag.a['href'])

    return urls


# 返回所有食物页面的url 列表
def build_url_list():
    url_list = []

    url_dirs = get_food_dir_urls()
    for url_dir in url_dirs:
        url_list += get_food_files_urls(url_head + url_dir)

    # print(url_list)
    return url_list


def build_food_database():
    conn = sqlite3.connect('../database/food.db')
    curs = conn.cursor()

    '''
    
    
    '''
    try:
        curs.execute('''
        CREATE TABLE food (
    
        NAME TEXT PRIMARY KEY,
        KCAL FLOAT,
        VB1 FLOAT,
        Ca FLOAT,
        PROTEIN FLOAT,
        VB2 FLOAT,
        Mg FLOAT,
        FAT FLOAT,
        VB3 FLOAT,
        Fe FLOAT,
        CARBOHYDRATE FLOAT,
        VC FLOAT,
        Mn FLOAT,
        DF FLOAT,
        VE FLOAT,
        Zn FLOAT,
        VA FLOAT,
        CHOLESTEROL FLOAT,
        Cu FLOAT,
        CAROTENE FLOAT,
        K FLOAT,
        P FLOAT,
        RE FLOAT,
        Na FLOAT,
        Se FLOAT
        )
        ''')
    except sqlite3.OperationalError:
        pass

    query = 'INSERT OR IGNORE INTO food VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'

    urls = build_url_list()

    for url in urls:
        text = urlopen(url_head + url).read()
        soup = BeautifulSoup(text, 'html.parser')

        values = []
        # 获取食物名字
        try:
            name = soup.font.b.string.strip()
            if name == '混合油(菜+棕)':
                continue
        except AttributeError:
            continue
        # print(name)
        values.append(name)

        # 获取营养信息列表
        data_list = soup.find_all('div', 'list')

        print(name)

        # 所有营养信息的列表
        for data in data_list:
            x = []
            for string in data.strings:
                x.append(string)

            value = x[1]
            values.append(float(value))

        curs.execute(query, values)

        # print(values)

    conn.commit()
    conn.close()

    return 0

# 调用函数，生成数据库
build_food_database()
