File size: 3,410 Bytes
7f1f398
 
 
 
 
 
 
a189251
 
 
 
 
 
 
 
 
 
 
 
7f1f398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cd17fa0
7f1f398
a0a9cb6
7f1f398
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
import zipfile
import json
import pandas as pd
from tensorflow.keras.models import model_from_json
from collections import Counter

def extract_tm_info(tm_path):
    with zipfile.ZipFile(tm_path, 'r') as zip_ref:
        with zip_ref.open('manifest.json') as f:
            manifest = json.load(f)
        return {
            'type': manifest.get('type', 'N/A'),
            'version': manifest.get('version', 'N/A'),
            'epochs': manifest.get('appdata', {}).get('trainEpochs', 'N/A'),
            'batch_size': manifest.get('appdata', {}).get('trainBatchSize', 'N/A'),
            'learning_rate': manifest.get('appdata', {}).get('trainLearningRate', 'N/A')
        }

def extract_zip_info(zip_path):
    with zipfile.ZipFile(zip_path, 'r') as zip_ref:
        file_list = zip_ref.namelist()
        metadata = model_json = None
        weights_file = None

        for file in file_list:
            if 'metadata.json' in file:
                with zip_ref.open(file) as f:
                    metadata = json.load(f)
            elif 'model.json' in file:
                with zip_ref.open(file) as f:
                    model_json = json.load(f)
            elif 'model.weights.bin' in file:
                weights_file = file

        if model_json:
            model_topology_json = model_json['modelTopology']
            model_json_string = json.dumps(model_topology_json)
            model = model_from_json(model_json_string)
            summary = {'layer_counts': Counter()}
            extract_layer_info(model_topology_json['config']['layers'], summary)
            layer_counts_text = ', '.join([f'{k}: {v}' for k, v in summary['layer_counts'].items()])
        else:
            layer_counts_text = "Modelo não encontrado"

        weights_info = {'size_bytes': zip_ref.getinfo(weights_file).file_size} if weights_file else {'size_bytes': 'Não encontrado'}

        return {
            'metadata': metadata if metadata else 'Metadados não encontrados',
            'model_summary': layer_counts_text,
            'weights_info': weights_info
        }


def extract_layer_info(layers, summary):
    for layer in layers:
        class_name = layer['class_name']
        summary['layer_counts'][class_name] += 1
        if class_name in ['Sequential', 'Model']:
            sub_layers = layer['config']['layers']
            extract_layer_info(sub_layers, summary)

def analyze_files(tm_file, zip_file):
    results = {}
    if tm_file is not None:
        tm_info = extract_tm_info(tm_file.name)
        results['tm_info'] = tm_info
    if zip_file is not None:
        zip_info = extract_zip_info(zip_file.name)
        results['zip_info'] = zip_info
    return pd.DataFrame([results]).to_html(escape=False)

iface = gr.Interface(
    fn=analyze_files,
    inputs=[
        gr.File(label="Upload .tm File"),
        gr.File(label="Upload .zip File")
    ],
    outputs=gr.HTML(),
    title="GTM-Scope",
    description="Upload a .tm or .zip file to extract its information. A .tm or .zip (with json) file is generated by Google Teachable Machine. The .tm file provides basic model information such as type, version, training parameters, while the .zip file (with json), obtained after training and exporting the model in Tensorflow.js format, offers detailed insights including model structure, weight sizes, and other metadata."
)
#
iface.launch(debug=True)