Spaces:
Running
Running
File size: 1,735 Bytes
63bdb5b 09f3a52 63bdb5b 09f3a52 63bdb5b 09f3a52 263c173 09f3a52 263c173 09f3a52 263c173 09f3a52 384deb6 09f3a52 384deb6 09f3a52 263c173 09f3a52 63bdb5b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import yaml
YAML_PATH = "./config.yaml"
class Dumper(yaml.Dumper):
def increase_indent(self, flow=False, *args, **kwargs):
return super().increase_indent(flow=flow, indentless=False)
# read scanners from yaml file
# return a list of scanners
def read_scanners(path):
scanners = []
with open(path, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
scanners = config.get("detectors", None)
return scanners
# convert a list of scanners to yaml file
def write_scanners(scanners):
with open(YAML_PATH, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config["detectors"] = scanners
with open(YAML_PATH, "w") as f:
# save scanners to detectors in yaml
yaml.dump(config, f, Dumper=Dumper)
# read model_type from yaml file
def read_inference_type(path):
inference_type = ""
with open(path, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
inference_type = config.get("inference_type", None)
return inference_type
# write model_type to yaml file
def write_inference_type(use_inference):
with open(YAML_PATH, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
if use_inference:
config["inference_type"] = 'hf_inference_api'
else:
config["inference_type"] = 'hf_pipeline'
with open(YAML_PATH, "w") as f:
# save inference_type to inference_type in yaml
yaml.dump(config, f, Dumper=Dumper)
# convert column mapping dataframe to json
def convert_column_mapping_to_json(df, label=""):
column_mapping = {}
column_mapping[label] = []
for _, row in df.iterrows():
column_mapping[label].append(row.tolist())
return column_mapping |