code
stringlengths 17
6.64M
|
---|
def main():
script_dir = os.path.dirname(os.path.abspath(__file__))
raw_images_root = os.path.join(script_dir, 'icons8_raw')
final_images_root = os.path.join(script_dir, 'icons8')
final_size = (28, 28)
resample_strategy = Image.NEAREST
for category in os.listdir(raw_images_root):
if (not os.path.isdir(os.path.join(raw_images_root, category))):
continue
if (not os.path.exists(os.path.join(final_images_root, 'training', category))):
os.makedirs(os.path.join(final_images_root, 'training', category))
if (not os.path.exists(os.path.join(final_images_root, 'testing', category))):
os.makedirs(os.path.join(final_images_root, 'testing', category))
category_image_filenames = os.listdir(os.path.join(raw_images_root, category))
for (i, filename) in enumerate(category_image_filenames):
split = ('training' if ((i % 2) == 0) else 'testing')
raw_image_path = os.path.join(raw_images_root, category, filename)
final_image_path = os.path.join(final_images_root, split, category, filename)
image = Image.open(raw_image_path)
small_image = image.resize(final_size, resample_strategy)
small_image.save(final_image_path)
|
def get_qualities(f, repeats):
'\n Get map of quality ratings from CSV file of annotations\n\n :param f: path to CSV annotations\n :return: map of filenames to quality ratings\n '
quality_dict = {}
with open(f, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
filename = row['filename']
if (filename not in repeats):
quality = row['file_attributes']
att_map = json.loads(quality)
quality_dict[filename] = att_map['Quality']
return quality_dict
|
def count_qualities(q1, q2):
'\n Get map of quality ratings from CSV file of annotations\n\n :param q1: first quality map\n :param q2: second quality map\n :return: list of total qualities from both annotators\n '
quality_counts = [0, 0, 0, 0]
for key in q1.keys():
quality = q1[key]
index = QUALITIES[quality]
quality_counts[index] = (quality_counts[index] + 1)
for key in q2.keys():
quality = q2[key]
index = QUALITIES[quality]
quality_counts[index] = (quality_counts[index] + 1)
return quality_counts
|
def calculate_po(q1, q2):
"\n Calculate Po for Cohen's Kappa\n\n :param q1: first quality map\n :param q2: second quality map\n :return: Po\n "
total = 0
agreed = 0
mismatched = []
for f in q1.keys():
total += 1
if (q1[f] == q2[f]):
agreed += 1
else:
mismatched.append(f)
return ((float(agreed) / total), mismatched)
|
def get_proportions(q):
'\n Get proportion of each quality score\n\n :param q: quality score map\n :return: map of proportions of each class\n '
total = 0
p_map = {GOOD: 0, POOR: 0, FAIR: 0, UNSATISFACTORY: 0}
for f in q.keys():
p_map[q[f]] = (p_map[q[f]] + 1)
total += 1
for key in p_map.keys():
p_map[key] = (p_map[key] / float(total))
return p_map
|
def calculate_pe(p1, p2):
"\n Calculate Pe for Cohen's Kappa\n\n :param p1: quality proportions for first annotator\n :param p2: quality proportions for second annotator\n :return: Pe\n "
p_poor = (p1[POOR] * p2[POOR])
p_fair = (p1[FAIR] * p2[FAIR])
p_uns = (p1[UNSATISFACTORY] * p2[UNSATISFACTORY])
return ((p_poor + p_fair) + p_uns)
|
def convert_2d_segmentation_nifti_to_img(nifti_file: str, output_filename: str, transform=None, export_dtype=np.uint8):
img = sitk.GetArrayFromImage(sitk.ReadImage(nifti_file))
assert (img.shape[0] == 1), 'This function can only export 2D segmentations!'
img = img[0]
if (transform is not None):
img = transform(img)
output_filename = output_filename.replace('_0000', '')
io.imsave(output_filename, img.astype(export_dtype), check_contrast=False)
|
def create_masks(mask_dict, img_path, mask1_path, mask2_path):
'\n Create and save masks from annotations\n\n :param mask_dict: dictionary of polygon annotations\n :param img_path: path to image file\n :param mask1_path: path for capsule mask\n :param mask2_path: path for region mask\n '
for key in mask_dict:
img = cv2.imread(os.path.join(img_path, key))
(height, width, channel) = img.shape
mask1 = np.zeros((height, width))
mask2 = np.zeros((height, width))
for region in range(1, 5):
if (region in mask_dict[key]):
points = mask_dict[key][region]
if (region == 1):
cv2.fillPoly(mask1, points, color=region)
else:
cv2.fillPoly(mask2, points, color=(region - 1))
cv2.imwrite(os.path.join(mask1_path, key), mask1)
cv2.imwrite(os.path.join(mask2_path, key), mask2)
|
def process_file(annotation_file):
'\n Extract region shape information from annotation file\n\n :param annotation_file: CSV file of annotations\n :return: dictionary of extracted information\n '
filename = ''
mask_dict = {}
with open(annotation_file, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
filename = row['filename']
if (filename not in mask_dict):
mask_dict[filename] = {}
shape = json.loads(row['region_shape_attributes'])
region_dict = json.loads(row['region_attributes'])
if (region_dict != {}):
region = LABEL_MAP[region_dict['Anatomy']]
else:
region = 0
if (region not in mask_dict[filename]):
mask_dict[filename][region] = []
if (shape != {}):
if (shape['name'] == 'rect'):
x_points = [shape['x']]
y_points = [shape['y']]
else:
x_points = shape['all_points_x']
y_points = shape['all_points_y']
all_points = []
for (i, x) in enumerate(x_points):
all_points.append([x, y_points[i]])
mask_dict[filename][region].append(np.array(all_points, 'int32'))
return mask_dict
|
def get_dsc_coef(pair, capsule_path, region_path):
'\n Compute DSC scores for given masks\n\n :param pair: set of images to compare\n :param capsule_path: path to region mask for first segmentation\n :param region_path: path to capsule mask for second segmentation\n :return: list of DSC scores for each class\n '
class_coefs = []
capsules = []
regions = []
for i in range(3):
capsules.append(cv2.imread(os.path.join(capsule_path, pair[i])))
regions.append(cv2.imread(os.path.join(region_path, pair[i])))
for i in range(1, (NUM_CLASSES + 1)):
masks = generate_masks(capsules, regions, i, 3)
dsc_total = 0
for pair in [(0, 1), (1, 2), (0, 2)]:
mask1 = masks[pair[0]]
mask2 = masks[pair[1]]
dsc_coef = compute_dsc(mask1, mask2)
dsc_total += dsc_coef
avg_dsc = (dsc_total / 3)
class_coefs.append(avg_dsc)
return class_coefs
|
def get_hd(pair, capsule_path, region_path, conversion):
'\n Compute Hausdorff distance for given masks\n\n :param pair: set of images to compare\n :param capsule_path: path to region mask for first segmentation\n :param region_path: path to capsule mask for second segmentation\n :param conversion: pixel to mm conversions\n :return: list of Hausdorff distances for each class\n '
class_coefs = []
capsules = []
regions = []
for i in range(3):
capsules.append(cv2.imread(os.path.join(capsule_path, pair[i])))
regions.append(cv2.imread(os.path.join(region_path, pair[i])))
for i in range(1, (NUM_CLASSES + 1)):
masks = generate_masks(capsules, regions, i, 3)
hd_total = 0
for pair in [(0, 1), (1, 2), (0, 2)]:
mask1 = masks[pair[0]]
mask2 = masks[pair[1]]
hd = compute_hd(mask1, mask2)
hd_total += hd
class_coefs.append(((hd_total / NUM_FILES) * conversion))
return class_coefs
|
def get_point_nums(f):
'\n Get number of points for each image in annotation file\n\n :param f: path to annotation file\n :return: map of image to the number of points per class\n '
point_dict = {}
with open(f, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
filename = row['filename']
region_dict = row['region_shape_attributes']
class_dict = row['region_attributes']
reg_map = json.loads(region_dict)
class_map = json.loads(class_dict)
if class_map:
reg_class = class_map['Anatomy']
if (filename not in point_dict):
point_dict[filename] = {'Capsule': 0, 'Central Echo Complex': 0, 'Medulla': 0, 'Cortex': 0}
if (len(reg_map) > 0):
if ('all_points_x' in reg_map):
num_points = len(reg_map['all_points_x'])
else:
num_points = 4
else:
num_points = 0
point_dict[filename][reg_class] = (point_dict[filename][reg_class] + num_points)
return point_dict
|
def get_percent_change(val1, val2):
if (val1 == 0):
return 0
else:
return abs(((100 * (val2 - val1)) / val1))
|
def get_mask_sensitivity(regions):
coefs = []
for i in range(2, 5):
new_coefs = []
masks = generate_masks(None, regions, i, 2)
new_coefs.append(compute_dsc(masks[0], masks[1]))
kernel = np.ones((3, 3), np.uint8)
og_mask = masks[1].copy().astype('uint8')
masks[1] = cv2.erode(og_mask, kernel, iterations=1)
new_coefs.append(compute_dsc(masks[0], masks[1]))
new_coefs.append(get_percent_change(new_coefs[0], new_coefs[1]))
masks[1] = cv2.erode(og_mask, kernel, iterations=10)
new_coefs.append(compute_dsc(masks[0], masks[1]))
new_coefs.append(get_percent_change(new_coefs[0], new_coefs[3]))
masks[1] = cv2.dilate(og_mask, kernel, iterations=1)
new_coefs.append(compute_dsc(masks[0], masks[1]))
new_coefs.append(get_percent_change(new_coefs[0], new_coefs[5]))
masks[1] = cv2.dilate(og_mask, kernel, iterations=10)
new_coefs.append(compute_dsc(masks[0], masks[1]))
new_coefs.append(get_percent_change(new_coefs[0], new_coefs[7]))
coefs = (coefs + format_floats_for_csv(new_coefs))
return coefs
|
def get_views(f):
'\n Get view counts from given annotation file\n\n :param f: VGG annotations file\n :return: dictionary of views for each file\n '
view_dict = {}
with open(f, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
filename = row['filename']
view = row['file_attributes']
att_map = json.loads(view)
view_dict[filename] = att_map['View']
return view_dict
|
def restrict_segmentations(mask_path, output_path):
cap_img_path = os.path.join(mask_path, 'capsule')
reg_img_path = os.path.join(mask_path, 'regions')
maybe_mkdir(os.path.join(output_path, 'capsule'))
maybe_mkdir(os.path.join(output_path, 'regions'))
for f in os.listdir(cap_img_path):
cap_img = cv2.imread(os.path.join(cap_img_path, f))
reg_img = cv2.imread(os.path.join(reg_img_path, f))
cap_img = cv2.cvtColor(cap_img, cv2.COLOR_BGR2GRAY)
reg_img = cv2.cvtColor(reg_img, cv2.COLOR_BGR2GRAY)
reg_img = (reg_img * cap_img)
cv2.imwrite(os.path.join(output_path, 'capsule', f), cap_img)
cv2.imwrite(os.path.join(output_path, 'regions', f), reg_img)
|
def clean_segmentations(mask_path, output_path):
cap_img_path = os.path.join(mask_path, 'capsule')
reg_img_path = os.path.join(mask_path, 'regions')
maybe_mkdir(os.path.join(output_path, 'capsule'))
maybe_mkdir(os.path.join(output_path, 'regions'))
for f in os.listdir(cap_img_path):
cap_img = cv2.imread(os.path.join(cap_img_path, f))
reg_img = cv2.imread(os.path.join(reg_img_path, f))
cap_img = cv2.cvtColor(cap_img, cv2.COLOR_BGR2GRAY)
(contours, hierarchy) = cv2.findContours(cap_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
new_cap_img = np.zeros(cap_img.shape)
if contour_sizes:
biggest_contour = max(contour_sizes, key=(lambda x: x[0]))[1]
cv2.fillPoly(new_cap_img, pts=[biggest_contour], color=1)
reg_img = reg_img
cv2.imwrite(os.path.join(output_path, 'capsule', f), new_cap_img)
cv2.imwrite(os.path.join(output_path, 'regions', f), reg_img)
|
def str_to_bool(value):
if isinstance(value, bool):
return value
if (value.lower() in {'false', 'f', '0', 'no', 'n'}):
return False
elif (value.lower() in {'true', 't', '1', 'yes', 'y'}):
return True
raise ValueError(f'{value} is not a valid boolean value')
|
def maybe_mkdir(directory):
if (not os.path.exists(directory)):
os.mkdir(directory)
|
def number_image(filename, name):
if (name not in patient_map):
patient_map[name] = len(patient_map)
return ((str(patient_map[name]) + '_') + filename)
|
def mask_and_convert_to_png(dcm_path, args, filename):
'\n Masks and saves dicom at given path to new anonymized png file\n\n :param dcm_path: path to DICOM file to process\n :param args: script arguments\n :param filename: filename to save to\n '
dicom = None
filenames = []
included_path = os.path.join(args.savepath, 'included')
excluded_path = os.path.join(args.savepath, 'excluded')
try:
dicom = Dicom(dcm_path)
except Exception as e:
with open(args.errorfiles, 'a+') as fp:
message = (((dcm_path + ': ') + str(e)) + '\n')
fp.write(message)
print(message)
return (None, filenames)
if (not dicom.check_contains_pixel_data()):
return (None, filenames)
dicom.convert_colourspace()
dicom.remove_patient_info()
metadata = dicom.metadata()
filename = number_image(filename, metadata['patient_name'])
outpath = os.path.join(included_path, filename)
try:
masked = dicom.video
except (ValueError, RuntimeError, AttributeError, IndexError) as ve:
with open(args.errorfiles, 'a+') as fp:
print(args.errorfiles)
message = (((dcm_path + ': ') + str(ve)) + '\n')
fp.write(message)
print(message)
masked = dicom.video
outpath = os.path.join(excluded_path, filename)
if args.grayscale:
masked = to_grayscale(dicom.video)
print('Fail')
if dicom.is_video:
for (i, frame) in enumerate(masked):
if (not args.grayscale):
mask = cv2.cvtColor(frame.astype('uint8'), cv2.COLOR_RGB2BGR)
else:
mask = frame
outfile = outpath.replace('.dcm', '_frame{}_anon.png').format(i)
filenames.append(os.path.basename(outfile))
else:
if (not args.grayscale):
masked = cv2.cvtColor(masked.astype('uint8'), cv2.COLOR_RGB2BGR)
outfile = outpath.replace('.dcm', '_anon.png')
filenames.append(os.path.basename(outfile))
return (metadata, filenames)
|
def mask_and_save_to_dicom(dcm_path, args, filename):
'\n Masks and saves dicom at given path to new anonymized DICOM file\n\n :param dcm_path: path to DICOM file to process\n :param args: script arguments\n :param filename: filename to save to\n '
dicom = Dicom(dcm_path)
metadata = dicom.metadata()
included_path = os.path.join(args.savepath, 'included')
excluded_path = os.path.join(args.savepath, 'excluded')
filename = number_image(filename, metadata['patient_name'])
outpath = os.path.join(included_path, filename)
try:
dicom.mask_pixel_array(crop=args.crop, resized=False, grayscale=args.grayscale, exclude_doppler=args.exclude_doppler)
except (ValueError, RuntimeError, AttributeError, IndexError) as ve:
with open(args.errorfiles, 'a+') as fp:
fp.write((((dcm_path + ': ') + str(ve)) + '\n'))
outpath = os.path.join(excluded_path, filename)
dicom.anonymize()
dicom.save(outpath)
return (metadata, [os.path.basename(outpath)])
|
def mask_and_save_to_nii(dcm_path, args, filename):
'\n Masks and saves dicom at given path to new anonymized nifty file\n\n :param dcm_path: path to DICOM file to process\n :param args: script arguments\n :param filename: filename to save to\n '
dicom = None
filenames = []
included_path = os.path.join(args.savepath, 'included')
excluded_path = os.path.join(args.savepath, 'excluded')
dicom = Dicom(dcm_path)
dicom.convert_colourspace()
dicom.remove_patient_info()
metadata = dicom.metadata()
outpath = os.path.join(included_path, filename)
if dicom.is_video:
print(filename)
if ((args.length == 'video') and (not dicom.is_video)):
return (None, filenames)
if ((args.length == 'img') and dicom.is_video):
return (None, filenames)
if (not dicom.check_contains_pixel_data()):
return (None, filenames)
try:
masked = dicom.masked_video(crop=args.crop, resized=False, grayscale=args.grayscale, exclude_doppler=args.exclude_doppler)
except (ValueError, RuntimeError, AttributeError, IndexError, OSError) as ve:
with open(args.errorfiles, 'a+') as fp:
message = (((dcm_path + ': ') + str(ve)) + '\n')
fp.write(message)
print(message)
masked = dicom.video
outpath = os.path.join(excluded_path, filename)
if args.grayscale:
masked = to_grayscale(dicom.video)
if (not args.grayscale):
masked = cv2.cvtColor(masked.astype('uint8'), cv2.COLOR_RGB2BGR)
outfile = outpath.replace('.dcm', '.nii.gz')
itk_img = sitk.GetImageFromArray(masked)
filenames.append(os.path.basename(outfile))
sitk.WriteImage(itk_img, outfile)
return (metadata, [os.path.basename(outpath)])
|
def maybe_mkdir(dirname):
if (not os.path.exists(dirname)):
os.makedirs(dirname)
|
def write_rows_to_file(csv_file, rows):
'\n Write given rows of data to CSV file\n\n :param csv_file: path to output CSV files\n :param rows: rows of data to write to file\n '
with open(csv_file, 'w', newline='') as fp:
csv_writer = csv.writer(fp)
for row in rows:
csv_writer.writerow(row)
|
def format_floats_for_csv(l):
new_l = []
for num in l:
truncated_num = float(('%.2f' % num))
new_l.append(truncated_num)
return new_l
|
def estimate_nakagami(arr):
arr = arr.astype(np.int64)
N = arr.size
arr2 = np.square(arr)
arr4 = np.square(arr2)
e_x2 = (np.sum(arr2) / N)
e_x4 = (np.sum(arr4) / N)
nak_scale = e_x2
if ((e_x4 - (e_x2 ** 2)) == 0):
nak_shape = 0
else:
nak_shape = ((e_x2 ** 2) / (e_x4 - (e_x2 ** 2)))
return np.nan_to_num([nak_shape, nak_scale])
|
def compute_nak_for_mask(img, mask, num_classes):
all_nak_params = []
for i in range(1, (num_classes + 1)):
pixels = img[np.where((mask == i))]
nak_params = estimate_nakagami(pixels)
all_nak_params.append(nak_params)
return all_nak_params
|
def compute_snr_for_mask(img, mask, num_classes):
all_snr = []
for i in range(1, (num_classes + 1)):
pixels = img[np.where((mask == i))]
if (pixels.size > 0):
mean = np.mean(pixels)
std = np.std(pixels)
snr = np.log10((mean / std))
else:
snr = 0
all_snr.append(snr)
return all_snr
|
def kl_divergence(p, q):
'\n Taken from https://towardsdatascience.com/kl-divergence-python-example-b87069e4b810\n '
return np.sum(np.where((p != 0), (p * np.log((p / q))), 0))
|
def compute_nakagami_kl_divergence(params1, params2):
lim = (max(params1[1], params2[1]) * 4)
x = np.arange(0.01, lim, 0.01)
p = nakagami.pdf(x, params1[0], loc=0, scale=params2[1])
q = nakagami.pdf(x, params2[0], loc=0, scale=params2[1])
if ((params1[0] == 0) and (params1[1] == 0) and (params2[0] == 0) and (params2[1] == 0)):
return 0
kl = entropy(p, qk=q)
if math.isnan(kl):
kl = (- 1)
return kl
|
def get_dsc_coef(capsule1_path, region1_path, capsule2_path, region2_path):
'\n Compute DSC coefficient for given masks\n\n :param capsule1_path: path to capsule mask for first segmentation\n :param region1_path: path to region mask for first segmentation\n :param capsule2_path: path to capsule mask for second segmentation\n :param region2_path: path to region mask for second segmentation\n :return: list of DSC scores for each class\n '
class_coefs = []
capsules = []
regions = []
capsules.append(cv2.imread(capsule1_path))
capsules.append(cv2.imread(capsule2_path))
regions.append(cv2.imread(region1_path))
regions.append(cv2.imread(region2_path))
for i in range(1, 5):
masks = generate_masks(capsules, regions, i, 2)
dsc_coef = compute_dsc(masks[0], masks[1])
class_coefs.append(dsc_coef)
return class_coefs
|
def generate_score_csv(path1, path2, outpath, score_func=get_dsc_coef):
'\n Compute DSC coefficients for mask pairs at given paths and save to\n CSV file\n\n :param path1: path first set of masks\n :param path2: path to second set of masks\n :param outpath: path to DSC score CSV file\n '
with open(outpath, 'w', newline='') as fp:
writer = csv.writer(fp)
writer.writerow(['file', 'Capsule', 'Central Echo Complex', 'Medulla', 'Cortex', 'mean'])
total = 0
avg = np.array([0, 0, 0, 0])
for filename in os.listdir(os.path.join(path1, 'capsule')):
capsule1_path = os.path.join(path1, 'capsule', filename)
region1_path = os.path.join(path1, 'regions', filename)
capsule2_path = os.path.join(path2, 'capsule', filename)
region2_path = os.path.join(path2, 'regions', filename)
coefs = score_func(capsule1_path, region1_path, capsule2_path, region2_path)
mean = np.array(coefs).mean()
row = (([filename] + coefs) + [mean])
writer.writerow(row)
avg = (avg + np.array(coefs))
total += 1
avg = (avg / total)
mean = (avg.sum() / 4)
writer.writerow(['mean', avg[0], avg[1], avg[2], avg[3], mean])
|
def get_precision(capsule1_path, region1_path, capsule2_path, region2_path):
'\n Compute precision for given masks\n\n :param capsule1_path: path to capsule mask for first segmentation\n :param region1_path: path to region mask for first segmentation\n :param capsule2_path: path to capsule mask for second segmentation\n :param region2_path: path to region mask for second segmentation\n :return: list of precisions for each class\n '
class_coefs = []
capsules = []
regions = []
capsules.append(cv2.imread(capsule1_path))
capsules.append(cv2.imread(capsule2_path))
regions.append(cv2.imread(region1_path))
regions.append(cv2.imread(region2_path))
for i in range(1, 5):
masks = generate_masks(capsules, regions, i, 2)
precision_coef = compute_precision(masks[0], masks[1])
class_coefs.append(precision_coef)
return class_coefs
|
def get_recall(capsule1_path, region1_path, capsule2_path, region2_path):
'\n Compute recall for given masks\n\n :param capsule1_path: path to capsule mask for first segmentation\n :param region1_path: path to region mask for first segmentation\n :param capsule2_path: path to capsule mask for second segmentation\n :param region2_path: path to region mask for second segmentation\n :return: list of recall for each class\n '
class_coefs = []
capsules = []
regions = []
capsules.append(cv2.imread(capsule1_path))
capsules.append(cv2.imread(capsule2_path))
regions.append(cv2.imread(region1_path))
regions.append(cv2.imread(region2_path))
for i in range(1, 5):
masks = generate_masks(capsules, regions, i, 2)
recall_coef = compute_recall(masks[0], masks[1])
class_coefs.append(recall_coef)
return class_coefs
|
def get_hd(capsule1_path, region1_path, capsule2_path, region2_path, conversion):
'\n Compute Hausdorff distance for given masks\n\n :param capsule1_path: path to capsule mask for first segmentation\n :param region1_path: path to region mask for first segmentation\n :param capsule2_path: path to capsule mask for second segmentation\n :param region2_path: path to region mask for second segmentation\n :param conversion: pixel to mm conversions\n :return: list of Hausdorff distances for each class\n '
class_coefs = []
capsules = []
regions = []
capsules.append(cv2.imread(capsule1_path))
capsules.append(cv2.imread(capsule2_path))
regions.append(cv2.imread(region1_path))
regions.append(cv2.imread(region2_path))
for i in range(1, (4 + 1)):
masks = generate_masks(capsules, regions, i, 2)
hd = compute_hd(masks[0], masks[1])
class_coefs.append((hd * conversion))
return class_coefs
|
def generate_hd_csv(path1, path2, converter, outpath):
'\n Compute Hausdorff distances for mask pairs at given paths and save to\n CSV file\n\n :param path1: path first set of masks\n :param path2: path to second set of masks\n :param converter: map of pixel to mm conversions\n :param outpath: path to Hausdorff distance CSV file\n '
with open(outpath, 'w', newline='') as fp:
writer = csv.writer(fp)
writer.writerow(['file', 'Capsule', 'Central Echo Complex', 'Medulla', 'Cortex', 'mean'])
total = 0
avg = np.array([0, 0, 0, 0])
for filename in os.listdir(os.path.join(path1, 'capsule')):
capsule1_path = os.path.join(path1, 'capsule', filename)
region1_path = os.path.join(path1, 'regions', filename)
capsule2_path = os.path.join(path2, 'capsule', filename)
region2_path = os.path.join(path2, 'regions', filename)
conversion = float(converter[filename])
coefs = get_hd(capsule1_path, region1_path, capsule2_path, region2_path, conversion)
mean = np.array(coefs).mean()
row = (([filename] + coefs) + [mean])
writer.writerow(row)
avg = (avg + np.array(coefs))
total += 1
avg = (avg / total)
mean = (avg.sum() / 4)
writer.writerow(['mean', avg[0], avg[1], avg[2], avg[3], mean])
|
def get_conversions(conversion_file):
'\n Build dictionary containing pixel to mm conversions\n\n :param conversion_file: CSV file containing pixel to mm conversions\n :return: dictionary of pixel to mm conversions\n '
conversions = {}
with open(conversion_file, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
filename = row['Filename']
factor = row['mm/pixel']
conversions[filename] = factor
return conversions
|
def get_repeat_sets(csv_file):
'\n Get sets of repeated files for intrarater variability\n\n :param csv_file: CSV file with sets of repeated files\n :return: list of lists of repeated filesets\n '
files = []
with open(csv_file, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
fileset = []
for i in range(1, 4):
fileset.append(row[('file' + str(i))])
files.append(fileset)
return files
|
def get_repeats(csv_file):
'\n Get list of repeated files\n\n :param csv_file: CSV file with sets of repeated files\n :return: list of repeated files to exclude\n '
files = []
with open(csv_file, 'r') as csv_fp:
csv_reader = csv.DictReader(csv_fp)
for row in csv_reader:
files.append(row['file2'])
files.append(row['file3'])
return files
|
def generate_masks(capsules, regions, cls, num):
'\n Get separated masks for each class from segmentation file\n\n :param capsules: capsule segmentation\n :param regions: regions segmentation\n :param cls: class to extract\n :param num: number of segmentations in list\n :return: masks for specified class\n '
masks = []
if (cls == 1):
for i in range(num):
masks.append((capsules[i] == cls))
else:
for i in range(num):
masks.append((regions[i] == (cls - 1)))
return masks
|
def compute_dsc(mask1, mask2):
'\n Compute DSC score for given masks\n\n :param mask1: first mask\n :param mask2: second mask\n :return: DSC score\n '
intersection = np.logical_and(mask1, mask2)
if ((mask1.sum() + mask2.sum()) != 0):
dsc_coef = ((2 * intersection.sum()) / (mask1.sum() + mask2.sum()))
else:
dsc_coef = 1
return dsc_coef
|
def compute_precision(pred, truth):
'\n Compute precision for given masks\n\n :param mask1: first mask\n :param mask2: second mask\n :return: precision\n '
tp = np.logical_and(truth, pred).sum()
fp = np.logical_and(np.logical_not(truth), pred).sum()
if (tp.sum() == 0):
precision = 0.0
elif ((truth.sum() + pred.sum()) != 0):
precision = (tp / (tp + fp))
else:
precision = 1
return precision
|
def compute_recall(pred, truth):
'\n Compute precision for given masks\n\n :param mask1: first mask\n :param mask2: second mask\n :return: recall\n '
tp = np.logical_and(truth, pred).sum()
fn = np.logical_and(truth, np.logical_not(pred)).sum()
if (tp.sum() == 0):
recall = 0.0
elif ((truth.sum() + pred.sum()) != 0):
recall = (tp / (tp + fn))
else:
recall = 1
return recall
|
def compute_hd(mask1, mask2):
'\n Compute Hausdorff distance for given masks\n\n :param mask1: first mask\n :param mask2: second mask\n :return: Hausdorff distance\n '
if ((mask1.sum() > 0) and (mask2.sum() > 0)):
hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()
img1 = sitk.GetImageFromArray(mask1.astype(int))
img2 = sitk.GetImageFromArray(mask2.astype(int))
hausdorff_distance_filter.Execute(img1, img2)
hd = hausdorff_distance_filter.GetHausdorffDistance()
else:
hd = 0
return hd
|
class Wrapper():
def __init__(self, d):
self.d = d
def __getattr__(self, x):
return self.d[x]
|
class HereBeDragons():
d = {}
FLAGS = Wrapper(d)
def __getattr__(self, x):
return self.do_define
def do_define(self, k, v, *x):
self.d[k] = v
|
def db(audio):
if (len(audio.shape) > 1):
maxx = np.max(np.abs(audio), axis=1)
return ((20 * np.log10(maxx)) if np.any((maxx != 0)) else np.array([0]))
maxx = np.max(np.abs(audio))
return ((20 * np.log10(maxx)) if (maxx != 0) else np.array([0]))
|
def load_wav(input_wav_file):
(fs, audio) = wav.read(input_wav_file)
assert (fs == 16000)
print('source dB', db(audio))
return audio
|
def save_wav(audio, output_wav_file):
wav.write(output_wav_file, 16000, np.array(np.clip(np.round(audio), (- (2 ** 15)), ((2 ** 15) - 1)), dtype=np.int16))
print('output dB', db(audio))
|
def levenshteinDistance(s1, s2):
if (len(s1) > len(s2)):
(s1, s2) = (s2, s1)
distances = range((len(s1) + 1))
for (i2, c2) in enumerate(s2):
distances_ = [(i2 + 1)]
for (i1, c1) in enumerate(s1):
if (c1 == c2):
distances_.append(distances[i1])
else:
distances_.append((1 + min((distances[i1], distances[(i1 + 1)], distances_[(- 1)]))))
distances = distances_
return distances[(- 1)]
|
def highpass_filter(data, cutoff=7000, fs=16000, order=10):
(b, a) = butter(order, (cutoff / (0.5 * fs)), btype='high', analog=False)
return lfilter(b, a, data)
|
def get_new_pop(elite_pop, elite_pop_scores, pop_size):
scores_logits = np.exp((elite_pop_scores - elite_pop_scores.max()))
elite_pop_probs = (scores_logits / scores_logits.sum())
cand1 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
cand2 = elite_pop[np.random.choice(len(elite_pop), p=elite_pop_probs, size=pop_size)]
mask = (np.random.rand(pop_size, elite_pop.shape[1]) < 0.5)
next_pop = ((mask * cand1) + ((1 - mask) * cand2))
return next_pop
|
def mutate_pop(pop, mutation_p, noise_stdev, elite_pop):
noise = (np.random.randn(*pop.shape) * noise_stdev)
noise = highpass_filter(noise)
mask = (np.random.rand(pop.shape[0], elite_pop.shape[1]) < mutation_p)
new_pop = (pop + (noise * mask))
return new_pop
|
class Genetic():
def __init__(self, input_wave_file, output_wave_file, target_phrase):
self.pop_size = 100
self.elite_size = 10
self.mutation_p = 0.005
self.noise_stdev = 40
self.noise_threshold = 1
self.mu = 0.9
self.alpha = 0.001
self.max_iters = 3000
self.num_points_estimate = 100
self.delta_for_gradient = 100
self.delta_for_perturbation = 1000.0
self.input_audio = load_wav(input_wave_file).astype(np.float32)
self.pop = np.expand_dims(self.input_audio, axis=0)
self.pop = np.tile(self.pop, (self.pop_size, 1))
self.output_wave_file = output_wave_file
self.target_phrase = target_phrase
self.funcs = self.setup_graph(self.pop, np.array([toks.index(x) for x in target_phrase]))
def setup_graph(self, input_audio_batch, target_phrase):
batch_size = input_audio_batch.shape[0]
weird = ((input_audio_batch.shape[1] - 1) // 320)
logits_arg2 = np.tile(weird, batch_size)
dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
pass_in = np.clip(input_audio_batch, (- (2 ** 15)), ((2 ** 15) - 1))
seq_len = np.tile(weird, batch_size).astype(np.int32)
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
inputs = tf.placeholder(tf.float32, shape=pass_in.shape, name='a')
len_batch = tf.placeholder(tf.float32, name='b')
arg2_logits = tf.placeholder(tf.int32, shape=logits_arg2.shape, name='c')
arg1_dense = tf.placeholder(tf.float32, shape=dense_arg1.shape, name='d')
arg2_dense = tf.placeholder(tf.int32, shape=dense_arg2.shape, name='e')
len_seq = tf.placeholder(tf.int32, shape=seq_len.shape, name='f')
logits = get_logits(inputs, arg2_logits)
target = ctc_label_dense_to_sparse(arg1_dense, arg2_dense, len_batch)
ctcloss = tf.nn.ctc_loss(labels=tf.cast(target, tf.int32), inputs=logits, sequence_length=len_seq)
(decoded, _) = tf.nn.ctc_greedy_decoder(logits, arg2_logits, merge_repeated=True)
sess = tf.Session()
saver = tf.train.Saver(tf.global_variables())
saver.restore(sess, 'models/session_dump')
func1 = (lambda a, b, c, d, e, f: sess.run(ctcloss, feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}))
func2 = (lambda a, b, c, d, e, f: sess.run([ctcloss, decoded], feed_dict={inputs: a, len_batch: b, arg2_logits: c, arg1_dense: d, arg2_dense: e, len_seq: f}))
return (func1, func2)
def getctcloss(self, input_audio_batch, target_phrase, decode=False):
batch_size = input_audio_batch.shape[0]
weird = ((input_audio_batch.shape[1] - 1) // 320)
logits_arg2 = np.tile(weird, batch_size)
dense_arg1 = np.array(np.tile(target_phrase, (batch_size, 1)), dtype=np.int32)
dense_arg2 = np.array(np.tile(target_phrase.shape[0], batch_size), dtype=np.int32)
pass_in = np.clip(input_audio_batch, (- (2 ** 15)), ((2 ** 15) - 1))
seq_len = np.tile(weird, batch_size).astype(np.int32)
if decode:
return self.funcs[1](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
else:
return self.funcs[0](pass_in, batch_size, logits_arg2, dense_arg1, dense_arg2, seq_len)
def get_fitness_score(self, input_audio_batch, target_phrase, input_audio, classify=False):
target_enc = np.array([toks.index(x) for x in target_phrase])
if classify:
(ctcloss, decoded) = self.getctcloss(input_audio_batch, target_enc, decode=True)
all_text = ''.join([toks[x] for x in decoded[0].values])
index = (len(all_text) // input_audio_batch.shape[0])
final_text = all_text[:index]
else:
ctcloss = self.getctcloss(input_audio_batch, target_enc)
score = (- ctcloss)
if classify:
return (score, final_text)
return (score, (- ctcloss))
def run(self, log=None):
max_fitness_score = float('-inf')
dist = float('inf')
best_text = ''
itr = 1
prev_loss = None
if (log is not None):
log.write((('target phrase: ' + self.target_phrase) + '\n'))
log.write('itr, corr, lev dist \n')
while ((itr <= self.max_iters) and (best_text != self.target_phrase)):
(pop_scores, ctc) = self.get_fitness_score(self.pop, self.target_phrase, self.input_audio)
elite_ind = np.argsort(pop_scores)[(- self.elite_size):]
(elite_pop, elite_pop_scores, elite_ctc) = (self.pop[elite_ind], pop_scores[elite_ind], ctc[elite_ind])
if ((prev_loss is not None) and (prev_loss != elite_ctc[(- 1)])):
self.mutation_p = ((self.mu * self.mutation_p) + (self.alpha / np.abs((prev_loss - elite_ctc[(- 1)]))))
if ((itr % 10) == 0):
print('**************************** ITERATION {} ****************************'.format(itr))
print('Current loss: {}'.format((- elite_ctc[(- 1)])))
save_wav(elite_pop[(- 1)], self.output_wave_file)
best_pop = np.tile(np.expand_dims(elite_pop[(- 1)], axis=0), (100, 1))
(_, best_text) = self.get_fitness_score(best_pop, self.target_phrase, self.input_audio, classify=True)
dist = levenshteinDistance(best_text, self.target_phrase)
corr = '{0:.4f}'.format(np.corrcoef([self.input_audio, elite_pop[(- 1)]])[0][1])
print('Audio similarity to input: {}'.format(corr))
print('Edit distance to target: {}'.format(dist))
print('Currently decoded as: {}'.format(best_text))
if (log is not None):
log.write((((((str(itr) + ', ') + corr) + ', ') + str(dist)) + '\n'))
if (dist > 2):
next_pop = get_new_pop(elite_pop, elite_pop_scores, self.pop_size)
self.pop = mutate_pop(next_pop, self.mutation_p, self.noise_stdev, elite_pop)
prev_loss = elite_ctc[(- 1)]
else:
perturbed = np.tile(np.expand_dims(elite_pop[(- 1)], axis=0), (self.num_points_estimate, 1))
indices = np.random.choice(self.pop.shape[1], size=self.num_points_estimate, replace=False)
perturbed[(np.arange(self.num_points_estimate), indices)] += self.delta_for_gradient
perturbed_scores = self.get_fitness_score(perturbed, self.target_phrase, self.input_audio)[0]
grad = ((perturbed_scores - elite_ctc[(- 1)]) / self.delta_for_gradient)
grad /= np.abs(grad).max()
modified = elite_pop[(- 1)].copy()
modified[indices] += (grad * self.delta_for_perturbation)
self.pop = np.tile(np.expand_dims(modified, axis=0), (self.pop_size, 1))
self.delta_for_perturbation *= 0.995
itr += 1
return (itr < self.max_iters)
|
def buildOrigCDFs(f, g):
global F
global G
global n
global m
F = np.sort(f)
n = len(F)
G = np.sort(g)
m = len(G)
|
def buildNewCDFs(f, g):
global Fb
global Gb
Fb = np.sort(f)
Gb = np.sort(g)
|
def invG(p):
index = int(np.ceil((p * m)))
if (index >= m):
return G[(m - 1)]
elif (index == 0):
return G[0]
return G[(index - 1)]
|
def invF(p):
index = int(np.ceil((p * n)))
if (index >= n):
return F[(n - 1)]
elif (index == 0):
return F[0]
return F[(index - 1)]
|
def invGnew(p, M):
index = int(np.ceil((p * M)))
if (index >= M):
return Gb[(M - 1)]
elif (index == 0):
return Gb[0]
return Gb[(index - 1)]
|
def invFnew(p, N):
index = int(np.ceil((p * N)))
if (index >= N):
return Fb[(N - 1)]
elif (index == 0):
return Fb[0]
return Fb[(index - 1)]
|
def epsilon(dp):
s = 0.0
se = 0.0
for p in np.arange(0, 1, dp):
temp = (invG(p) - invF(p))
tempe = max(temp, 0)
s = (s + ((temp * temp) * dp))
se = (se + ((tempe * tempe) * dp))
if (s != 0):
return (se / s)
else:
print('The denominator is 0')
return 0.0
|
def epsilonNew(dp, N, M):
denom = 0.0
numer = 0.0
for p in np.arange(0, 1, dp):
diff = (invGnew(p, M) - invFnew(p, N))
posdiff = max(diff, 0)
denom += ((diff * diff) * dp)
numer += ((posdiff * posdiff) * dp)
if (denom != 0.0):
return (numer / denom)
else:
print('The denominator is 0')
return 0.0
|
def COS(data_A, data_B):
print('AVG ', np.average(data_A), np.average(data_B))
print('STD ', np.std(data_A), np.std(data_B))
print('MEDIAN ', np.median(data_A), np.median(data_B))
print('MIN ', np.min(data_A), np.min(data_B))
print('MAX ', np.max(data_A), np.max(data_B))
|
def MannWhitney(data_A, data_B):
if ((n < 20) or (m < 20)):
print('Use only when the number of observation in each sample is > 20')
return 1.0
(_, pval) = Utest(data_A, data_B, alternative='less')
return pval
|
def main():
if (len(sys.argv) < 3):
print('Not enough arguments\n')
sys.exit()
filename_A = sys.argv[1]
filename_B = sys.argv[2]
alpha = float(sys.argv[3])
with open(filename_A) as f:
data_A = f.read().splitlines()
with open(filename_B) as f:
data_B = f.read().splitlines()
data_A = list(map(float, data_A))
data_B = list(map(float, data_B))
buildOrigCDFs(data_A, data_B)
dp = 0.005
N = 1000
M = 1000
B = 1000
eps_FnGm = epsilon(dp)
lamda = ((0.0 + N) / (N + M))
const = np.sqrt((((1.0 * N) * M) / ((N + M) + 0.0)))
samples = []
for b in range(B):
Fb = []
Gb = []
Fvalues = []
Gvalues = []
uniF = np.random.uniform(0, 1, N)
uniG = np.random.uniform(0, 1, M)
for i in range(0, N):
Fvalues.append(invF(uniF[i]))
for j in range(0, M):
Gvalues.append(invG(uniG[j]))
buildNewCDFs(Fvalues, Gvalues)
distance = epsilonNew(dp, N, M)
samples.append(distance)
sigma = np.std(samples)
min_epsilon = min(max((eps_FnGm - (((1 / const) * sigma) * normal.ppf(alpha))), 0.0), 1.0)
print('The minimal epsilon for which Algorithm A is almost stochastically greater than algorithm B is ', min_epsilon)
if ((min_epsilon <= 0.5) and (min_epsilon > 0.0)):
print('since epsilon <= 0.5 we will claim that A is better than B with significance level alpha=', alpha)
elif (min_epsilon == 0.0):
print('since epsilon = 0, algorithm A is stochatically dominant over B')
else:
print('since epsilon > 0.5 we will claim that A is not better than B with significance level alpha=', alpha)
|
class InputFeatures(object):
'A single set of features of data.'
def __init__(self, input_ids, head_span, tail_span, token_masks):
self.input_ids = input_ids
self.head_span = head_span
self.tail_span = tail_span
self.token_masks = token_masks
|
class Instance(object):
def __init__(self, words, relation, head, tail, headpos, tailpos, headtype, tailtype, ner=None, is_noise=None):
self.words = words
self.relation = relation
self.head = head
self.tail = tail
self.headpos = headpos
self.tailpos = tailpos
self.headtype = headtype
self.tailtype = tailtype
self.d_rel = ''
self.ner = ner
self.is_noise = is_noise
|
class Data():
def __init__(self, args, mode='train'):
if (mode == 'train'):
data_file = args.train_data_file
elif (mode == 'test'):
data_file = args.test_data_file
elif (mode == 'dev'):
data_file = args.dev_data_file
elif (mode == 'test_noise'):
data_file = args.test_noise_file
self.dataset = 'nyt'
rel2id_file = args.rel2id_file
self.max_len = args.max_len
self.tokenizer = WordTokenizer(args.vocab_file)
self.use_noise_label = args.noise_label
self.create_label_dict()
self.facts = defaultdict(set)
print('Data Loading!-----')
if (self.use_noise_label and (mode == 'test_noise')):
data = self.load_data_nyt_arnor_ner_noise(data_file, rel2id_file)
else:
data = self.load_data_nyt_arnor_ner(data_file, rel2id_file)
ori_data_len = len(data)
print('Data Loaded!-----')
print('Data Preprocessing!-----')
features = self.preprocess(data)
print('Data Preprocessed!-----')
print('Processed Data List Creating!----')
self.processed_data = []
delete_index = []
self.rel_num = defaultdict(int)
for (_, (item, feature)) in enumerate(zip(data, features)):
if (feature is None):
delete_index.append(_)
continue
temp_item = {}
temp_item['input_ids'] = feature.input_ids
temp_item['e1_begin'] = feature.head_span[0]
temp_item['e1_end'] = feature.head_span[1]
temp_item['e2_begin'] = feature.tail_span[0]
temp_item['e2_end'] = feature.tail_span[1]
if (not item.relation):
delete_index.append(_)
continue
temp_item['rel'] = item.relation
temp_item['ori_sentence'] = item.words
temp_item['token_masks'] = feature.token_masks
temp_item['bag_name'] = (item.head, item.tail, item.relation)
temp_item['ner'] = item.ner
if self.use_noise_label:
temp_item['is_noise'] = item.is_noise
self.rel_num[item.relation] += 1
self.processed_data.append(temp_item)
print('Processed Data List Created!----')
print('Processed data has {} instances'.format(len(self.processed_data)))
for (rel, num) in self.rel_num.items():
print('{}: {}'.format(rel, num))
def load_predenoise_labels(self, path, describe=''):
with open(((path + describe) + '_labels.txt'), 'r') as f:
labels = json.load(f)
return labels
def load_data_nyt_arnor_ner(self, data_file, rel2id_file, load_ner=True):
self.create_label_dict(rel2id_file)
if load_ner:
self.create_ner_dict()
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
if (relation == 'None'):
relation = 'NA'
head = item['head']['word']
tail = item['tail']['word']
if (relation != 'NA'):
self.facts[(head, tail)].add(relation)
try:
head_list = head.split()
pos = (- 1)
while True:
pos = words.index(head_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(head_list))]) == head):
head_pos = (pos, ((pos + len(head_list)) - 1))
break
tail_list = tail.split()
pos = (- 1)
while True:
pos = words.index(tail_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(tail_list))]) == tail):
tail_pos = (pos, ((pos + len(tail_list)) - 1))
break
except:
continue
head_type = item['head']['type']
tail_type = item['tail']['type']
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, ner))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def load_data_nyt_arnor_ner_noise(self, data_file, rel2id_file, load_ner=True):
self.create_label_dict(rel2id_file)
if load_ner:
self.create_ner_dict()
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
if (relation == 'None'):
relation = 'NA'
head = item['head']['word']
tail = item['tail']['word']
if (relation != 'NA'):
self.facts[(head, tail)].add(relation)
try:
head_list = head.split()
pos = (- 1)
while True:
pos = words.index(head_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(head_list))]) == head):
head_pos = (pos, ((pos + len(head_list)) - 1))
break
tail_list = tail.split()
pos = (- 1)
while True:
pos = words.index(tail_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(tail_list))]) == tail):
tail_pos = (pos, ((pos + len(tail_list)) - 1))
break
except:
continue
head_type = item['head']['type']
tail_type = item['tail']['type']
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
is_noise = item['is_noise']
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, ner, is_noise))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def create_ner_dict(self):
file = 'data/ner2id.json'
with open(file, 'r') as f:
self.ner2id = json.load(f)
def get_label_num(self):
return len(self.relId2labelId)
def preprocess(self, data, token_mask_id=0):
features = []
unk = 0
for (idx, item) in enumerate(data):
tokens = item.words
if (len(tokens) > self.max_len):
features.append(None)
continue
(input_ids, unk_num) = self.tokenizer.convert_tokens_to_ids(tokens, self.max_len, self.tokenizer.vocab['[PAD]'], self.tokenizer.vocab['[UNK]'], uncased=True)
head_span = [item.headpos[0], (item.headpos[(- 1)] + 1)]
tail_span = [item.tailpos[0], (item.tailpos[(- 1)] + 1)]
token_masks = ([1] * len(input_ids))
if (idx < 5):
print('*** Example ***')
print('tokens: {}'.format(' '.join(tokens)))
print('E1 position:({}, {}), E2 position:({}, {})'.format(head_span[0], head_span[1], tail_span[0], tail_span[1]))
print('token mask: {}'.format(str(token_masks)))
print('input ids: {}'.format(str(input_ids)))
features.append(InputFeatures(input_ids=input_ids, head_span=head_span, tail_span=tail_span, token_masks=token_masks))
unk += unk_num
print('Convert token to vocab id, unk token num: {}'.format(unk))
return features
def get_vocab_size(self):
return len(self.tokenizer.vocab)
def create_label_dict(self, file=None):
if (file is None):
self.relId2labelId = LABEL_TO_ID
self.labelId2rel = {v: k for (k, v) in self.relId2labelId.items()}
else:
with open(file, 'r') as f:
line = json.load(f)
self.relId2labelId = line
self.labelId2rel = {v: k for (k, v) in self.relId2labelId.items()}
def get_labels(self):
return self.labels
def id2rel(self, id):
return self.labelId2rel.get(id, None)
def rel2id(self, rel):
return self.relId2labelId.get(rel, None)
def get_label_num(self):
return len(self.relId2labelId)
def posnum_to_posarray(self, posbegin, posend):
if (posend < posbegin):
posend = posbegin
array1 = (np.arange(0, posbegin) - posbegin)
array2 = np.zeros((posend - posbegin), dtype=np.int32)
array3 = (np.arange(posend, self.max_len) - posend)
posarray = (np.append(np.append(array1, array2), array3) + self.max_len)
return posarray
def batchify(self, noise_label=False):
batch_data = []
PAD = self.tokenizer.vocab['[PAD]']
ner_PAD = self.ner2id['[PAD]']
for (i, item) in enumerate(self.processed_data):
padding_size = (self.max_len - len(item['input_ids']))
ori_token_masks = torch.LongTensor((item['token_masks'] + ([0] * padding_size)))
head_masks = torch.zeros((len(item['input_ids']) + padding_size)).long()
head_masks[item['e1_begin']:item['e1_end']] = 1
head_masks = (head_masks * ori_token_masks)
tail_masks = torch.zeros((len(item['input_ids']) + padding_size)).long()
tail_masks[item['e2_begin']:item['e2_end']] = 1
tail_masks = (tail_masks * ori_token_masks)
head_pos = torch.LongTensor(self.posnum_to_posarray(item['e1_begin'], (item['e1_end'] - 1)))
tail_pos = torch.LongTensor(self.posnum_to_posarray(item['e2_begin'], (item['e2_end'] - 1)))
try:
assert (head_pos.size(0) == self.max_len)
except:
print(item['e1_begin'], item['e1_end'])
input_ids = torch.LongTensor((item['input_ids'] + ([PAD] * padding_size)))
input_masks = torch.LongTensor((([1] * len(item['input_ids'])) + ([0] * padding_size)))
labels = torch.LongTensor([self.relId2labelId[item['rel']]])
ner_labels = torch.LongTensor((item['ner'] + ([ner_PAD] * padding_size)))
batch_data.append([head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels])
if noise_label:
is_noise = item['is_noise']
batch_data[(- 1)].append(is_noise)
return batch_data
def dumpData(self, save_path):
with open(save_path, 'wb'):
pickle.dump(self, save_path)
|
class InputFeatures(object):
'A single set of features of data.'
def __init__(self, input_ids, head_span, tail_span, token_masks):
self.input_ids = input_ids
self.head_span = head_span
self.tail_span = tail_span
self.token_masks = token_masks
|
class Instance(object):
def __init__(self, words, relation, head, tail, headpos, tailpos, headtype, tailtype, d_rel='', ner=None, is_noise=None):
self.words = words
self.relation = relation
self.head = head
self.tail = tail
self.headpos = headpos
self.tailpos = tailpos
self.headtype = headtype
self.tailtype = tailtype
self.d_rel = d_rel
self.ner = ner
self.is_noise = is_noise
|
class Data():
def __init__(self, args, mode='train'):
if (mode == 'train'):
data_file = args.train_data_file
elif (mode == 'test'):
data_file = args.test_data_file
elif (mode == 'dev'):
data_file = args.dev_data_file
elif (mode == 'test_noise'):
data_file = args.test_noise_file
self.dataset = 'tacred'
rel2id_file = args.rel2id_file
self.max_len = args.max_len
self.tokenizer = WordTokenizer(args.vocab_file)
self.use_noise_label = args.noise_label
self.create_label_dict()
self.facts = defaultdict(set)
print('Data Loading!-----')
if (self.use_noise_label and (mode == 'test_noise')):
data = self.load_data_nyt_arnor_ner_noise(data_file, rel2id_file)
elif (self.dataset == 'nyt'):
data = self.load_data_nyt_arnor_ner(data_file, rel2id_file)
elif (self.dataset == 'tacred'):
data = self.load_data_tacred_arnor_ner(data_file, rel2id_file)
ori_data_len = len(data)
print('Data Loaded!-----')
print('Data Preprocessing!-----')
features = self.preprocess(data)
print('Data Preprocessed!-----')
print('Processed Data List Creating!----')
self.processed_data = []
delete_index = []
self.rel_num = defaultdict(int)
for (_, (item, feature)) in enumerate(zip(data, features)):
if (feature is None):
delete_index.append(_)
continue
temp_item = {}
temp_item['input_ids'] = feature.input_ids
temp_item['e1_begin'] = feature.head_span[0]
temp_item['e1_end'] = feature.head_span[1]
temp_item['e2_begin'] = feature.tail_span[0]
temp_item['e2_end'] = feature.tail_span[1]
if (not item.relation):
delete_index.append(_)
continue
temp_item['rel'] = item.relation
temp_item['D_rel'] = item.d_rel
temp_item['ori_sentence'] = item.words
temp_item['token_masks'] = feature.token_masks
temp_item['bag_name'] = (item.head, item.tail, item.relation)
temp_item['ner'] = item.ner
if self.use_noise_label:
temp_item['is_noise'] = item.is_noise
self.rel_num[item.relation] += 1
self.processed_data.append(temp_item)
print('Processed Data List Created!----')
print('Processed data has {} instances'.format(len(self.processed_data)))
for (rel, num) in self.rel_num.items():
print('{}: {}'.format(rel, num))
def load_predenoise_labels(self, path, describe=''):
with open(((path + describe) + '_labels.txt'), 'r') as f:
labels = json.load(f)
return labels
def load_data_nyt_arnor_ner(self, data_file, rel2id_file, load_ner=True):
self.create_label_dict(rel2id_file)
if load_ner:
self.create_ner_dict()
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
if (relation == 'None'):
relation = 'NA'
head = item['head']['word']
tail = item['tail']['word']
if (relation != 'NA'):
self.facts[(head, tail)].add(relation)
try:
head_list = head.split()
pos = (- 1)
while True:
pos = words.index(head_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(head_list))]) == head):
head_pos = (pos, ((pos + len(head_list)) - 1))
break
tail_list = tail.split()
pos = (- 1)
while True:
pos = words.index(tail_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(tail_list))]) == tail):
tail_pos = (pos, ((pos + len(tail_list)) - 1))
break
except:
continue
head_type = item['head']['type']
tail_type = item['tail']['type']
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, ner=ner))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def load_data_tacred_arnor_ner(self, data_file, rel2id_file, load_ner=True):
if load_ner:
self.create_ner_dict(file='data/tacred_ner2id.json')
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
d_rel = item['D_relation']
head = item['head']['word']
tail = item['tail']['word']
self.facts[(head, tail)].add(relation)
head_pos = item['head']['pos']
tail_pos = item['tail']['pos']
head_type = item['head']['type']
tail_type = item['tail']['type']
(ss, se) = head_pos
(os, oe) = tail_pos
words[ss:(se + 1)] = ([('SUBJ-' + head_type)] * ((se - ss) + 1))
words[os:(oe + 1)] = ([('OBJ-' + tail_type)] * ((oe - os) + 1))
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, d_rel=d_rel, ner=ner))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def load_data_nyt_arnor_ner_noise(self, data_file, rel2id_file, load_ner=True):
self.create_label_dict(rel2id_file)
if load_ner:
self.create_ner_dict()
with open(data_file, 'r') as infile:
data = json.load(infile)
instances = []
for item in data:
words = item['sentence'].split(' ')
if (len(words) > self.max_len):
continue
relation = item['relation']
if (relation == 'None'):
relation = 'NA'
head = item['head']['word']
tail = item['tail']['word']
if (relation != 'NA'):
self.facts[(head, tail)].add(relation)
try:
head_list = head.split()
pos = (- 1)
while True:
pos = words.index(head_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(head_list))]) == head):
head_pos = (pos, ((pos + len(head_list)) - 1))
break
tail_list = tail.split()
pos = (- 1)
while True:
pos = words.index(tail_list[0], (pos + 1))
if (' '.join(words[pos:(pos + len(tail_list))]) == tail):
tail_pos = (pos, ((pos + len(tail_list)) - 1))
break
except:
continue
head_type = item['head']['type']
tail_type = item['tail']['type']
if load_ner:
ner = [self.ner2id[i] for i in item['stanford_ner']]
else:
ner = None
is_noise = item['is_noise']
instances.append(Instance(words, relation, head, tail, head_pos, tail_pos, head_type, tail_type, ner=ner, is_noise=is_noise))
print('Original data has {} instances'.format(len(instances)))
print('***** print examples ******')
for ins in instances[:5]:
print('words: {}, head: {}, head_pos: {}, tail: {}, tail_pos: {}, relation: {}, d_rel: {}, ner: {}'.format(' '.join(ins.words), ins.head, str(ins.headpos), ins.tail, str(ins.tailpos), ins.relation, ins.d_rel, ins.ner))
return instances
def create_ner_dict(self, file=None):
if (file is None):
file = 'data/ner2id.json'
with open(file, 'r') as f:
self.ner2id = json.load(f)
def get_label_num(self):
return len(self.relId2labelId)
def preprocess(self, data, token_mask_id=0):
features = []
unk = 0
for (idx, item) in enumerate(data):
tokens = item.words
if (len(tokens) > self.max_len):
features.append(None)
continue
(input_ids, unk_num) = self.tokenizer.convert_tokens_to_ids(tokens, self.max_len, self.tokenizer.vocab['[PAD]'], self.tokenizer.vocab['[UNK]'], uncased=False)
head_span = [item.headpos[0], (item.headpos[(- 1)] + 1)]
tail_span = [item.tailpos[0], (item.tailpos[(- 1)] + 1)]
token_masks = ([1] * len(input_ids))
if (idx < 5):
print('*** Example ***')
print('tokens: {}'.format(' '.join(tokens)))
print('E1 position:({}, {}), E2 position:({}, {})'.format(head_span[0], head_span[1], tail_span[0], tail_span[1]))
print('token mask: {}'.format(str(token_masks)))
print('input ids: {}'.format(str(input_ids)))
features.append(InputFeatures(input_ids=input_ids, head_span=head_span, tail_span=tail_span, token_masks=token_masks))
unk += unk_num
print('Convert token to vocab id, unk token num: {}'.format(unk))
return features
def get_vocab_size(self):
return len(self.tokenizer.vocab)
def create_label_dict(self, file=None):
if (file is None):
self.relId2labelId = LABEL_TO_ID
self.labelId2rel = {v: k for (k, v) in self.relId2labelId.items()}
else:
with open(file, 'r') as f:
line = json.load(f)
self.relId2labelId = line
self.labelId2rel = {v: k for (k, v) in self.relId2labelId.items()}
def get_labels(self):
return self.labels
def id2rel(self, id):
return self.labelId2rel.get(id, None)
def rel2id(self, rel):
return self.relId2labelId.get(rel, None)
def get_label_num(self):
return len(self.relId2labelId)
def posnum_to_posarray(self, posbegin, posend):
if (posend < posbegin):
posend = posbegin
array1 = (np.arange(0, posbegin) - posbegin)
array2 = np.zeros((posend - posbegin), dtype=np.int32)
array3 = (np.arange(posend, self.max_len) - posend)
posarray = (np.append(np.append(array1, array2), array3) + self.max_len)
return posarray
def batchify(self, noise_label=False):
batch_data = []
PAD = self.tokenizer.vocab['[PAD]']
ner_PAD = self.ner2id['[PAD]']
for (i, item) in enumerate(self.processed_data):
padding_size = (self.max_len - len(item['input_ids']))
ori_token_masks = torch.LongTensor((item['token_masks'] + ([0] * padding_size)))
head_masks = torch.zeros((len(item['input_ids']) + padding_size)).long()
head_masks[item['e1_begin']:item['e1_end']] = 1
head_masks = (head_masks * ori_token_masks)
tail_masks = torch.zeros((len(item['input_ids']) + padding_size)).long()
tail_masks[item['e2_begin']:item['e2_end']] = 1
tail_masks = (tail_masks * ori_token_masks)
head_pos = torch.LongTensor(self.posnum_to_posarray(item['e1_begin'], (item['e1_end'] - 1)))
tail_pos = torch.LongTensor(self.posnum_to_posarray(item['e2_begin'], (item['e2_end'] - 1)))
try:
assert (head_pos.size(0) == self.max_len)
except:
print(item['e1_begin'], item['e1_end'])
input_ids = torch.LongTensor((item['input_ids'] + ([PAD] * padding_size)))
input_masks = torch.LongTensor((([1] * len(item['input_ids'])) + ([0] * padding_size)))
labels = torch.LongTensor([self.relId2labelId[item['rel']]])
if (self.dataset == 'tacred'):
D_labels = torch.LongTensor([self.relId2labelId[item['D_rel']]])
ner_labels = torch.LongTensor((item['ner'] + ([ner_PAD] * padding_size)))
if (self.dataset == 'tacred'):
batch_data.append([head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels, D_labels])
else:
batch_data.append([head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels])
if noise_label:
is_noise = item['is_noise']
batch_data[(- 1)].append(is_noise)
return batch_data
def dumpData(self, save_path):
with open(save_path, 'wb'):
pickle.dump(self, save_path)
|
class WordTokenizer(object):
'Runs WordPiece tokenziation.'
def __init__(self, vocab=None, unk_token='[UNK]', pad_token='[PAD]'):
self.vocab = load_vocab(vocab)
self.inv_vocab = {v: k for (k, v) in self.vocab.items()}
self.unk_token = unk_token
self.pad_token = pad_token
if (not (pad_token in self.vocab)):
self.vocab[pad_token] = len(self.vocab)
def tokenize(self, text):
' Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization\n using the given vocabulary.\n\n For example:\n input = "unaffable"\n output = ["un", "##aff", "##able"]\n\n Args:\n text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`.\n Returns:\n output_tokens: A list of wordpiece tokens.\n current_positions: A list of the current positions for the original words in text .\n '
text = convert_to_unicode(text)
text = clean_text(text)
text = tokenize_chinese_chars(text)
token_list = split_on_whitespace(text)
return token_list
def convert_tokens_to_ids(self, tokens, max_seq_length=None, blank_id=0, unk_id=1, uncased=True):
return convert_by_vocab(self.vocab, tokens, max_seq_length, blank_id, unk_id, uncased=uncased)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab(self.inv_vocab, ids)
|
def is_whitespace(char):
' Checks whether `chars` is a whitespace character.\n \t, \n, and \r are technically contorl characters but we treat them\n as whitespace since they are generally considered as such.\n '
if ((char == ' ') or (char == '\t') or (char == '\n') or (char == '\r')):
return True
cat = unicodedata.category(char)
if (cat == 'Zs'):
return True
return False
|
def is_control(char):
' Checks whether `chars` is a control character.\n These are technically control characters but we count them as whitespace characters.\n '
if ((char == '\t') or (char == '\n') or (char == '\r')):
return False
cat = unicodedata.category(char)
if cat.startswith('C'):
return True
return False
|
def is_punctuation(char):
' Checks whether `chars` is a punctuation character.\n We treat all non-letter/number ASCII as punctuation. Characters such as "^", "$", and "`" are not in the Unicode.\n Punctuation class but we treat them as punctuation anyways, for consistency.\n '
cp = ord(char)
if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))):
return True
cat = unicodedata.category(char)
if cat.startswith('P'):
return True
return False
|
def is_chinese_char(cp):
' Checks whether CP is the codepoint of a CJK character.\n This defines a "chinese character" as anything in the CJK Unicode block:\n https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)\n Note that the CJK Unicode block is NOT all Japanese and Korean characters,\n despite its name. The modern Korean Hangul alphabet is a different block,\n as is Japanese Hiragana and Katakana. Those alphabets are used to write\n space-separated words, so they are not treated specially and handled\n like the all of the other languages.\n '
if (((cp >= 19968) and (cp <= 40959)) or ((cp >= 13312) and (cp <= 19903)) or ((cp >= 131072) and (cp <= 173791)) or ((cp >= 173824) and (cp <= 177983)) or ((cp >= 177984) and (cp <= 178207)) or ((cp >= 178208) and (cp <= 183983)) or ((cp >= 63744) and (cp <= 64255)) or ((cp >= 194560) and (cp <= 195103))):
return True
return False
|
def convert_to_unicode(text):
"Converts `text` to Unicode (if it's not already), assuming utf-8 input."
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode('utf-8', 'ignore')
elif isinstance(text, unicode):
return text
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?')
|
def clean_text(text):
output = []
for char in text:
cp = ord(char)
if ((cp == 0) or (cp == 65533) or is_control(char)):
continue
if is_whitespace(char):
output.append(' ')
else:
output.append(char)
return ''.join(output)
|
def split_on_whitespace(text):
" Runs basic whitespace cleaning and splitting on a peice of text.\n e.g, 'a b c' -> ['a', 'b', 'c']\n "
text = text.strip()
if (not text):
return []
return text.split()
|
def split_on_punctuation(text):
'Splits punctuation on a piece of text.'
start_new_word = True
output = []
for char in text:
if is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[(- 1)].append(char)
return [''.join(x) for x in output]
|
def tokenize_chinese_chars(text):
'Adds whitespace around any CJK character.'
output = []
for char in text:
cp = ord(char)
if is_chinese_char(cp):
output.append(' ')
output.append(char)
output.append(' ')
else:
output.append(char)
return ''.join(output)
|
def strip_accents(text):
'Strips accents from a piece of text.'
text = unicodedata.normalize('NFD', text)
output = []
for char in text:
cat = unicodedata.category(char)
if (cat == 'Mn'):
continue
output.append(char)
return ''.join(output)
|
def load_vocab(vocab_file):
vocab = json.load(open(vocab_file))
return vocab
|
def printable_text(text):
" Returns text encoded in a way suitable for print or `tf.logging`.\n These functions want `str` for both Python2 and Python3, but in one case\n it's a Unicode string and in the other it's a byte string.\n "
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode('utf-8', 'ignore')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
elif six.PY2:
if isinstance(text, str):
return text
elif isinstance(text, unicode):
return text.encode('utf-8')
else:
raise ValueError(('Unsupported string type: %s' % type(text)))
else:
raise ValueError('Not running on Python2 or Python 3?')
|
def convert_by_vocab(vocab, items, max_seq_length=None, blank_id=0, unk_id=1, uncased=True):
'Converts a sequence of [tokens|ids] using the vocab.'
output = []
unk_num = 0
for item in items:
if uncased:
item = item.lower()
if (item in vocab):
output.append(vocab[item])
else:
output.append(unk_id)
unk_num += 1
return (output, unk_num)
|
def convert_tokens_to_ids(vocab, tokens, max_seq_length=None, blank_id=0, unk_id=1):
return convert_by_vocab(vocab, tokens, max_seq_length, blank_id, unk_id)
|
def convert_ids_to_tokens(inv_vocab, ids):
return convert_by_vocab(inv_vocab, ids)
|
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
'Truncates a pair of sequences to a maximum sequence length.'
while True:
total_length = (len(tokens_a) + len(tokens_b))
if (total_length <= max_num_tokens):
break
trunc_tokens = (tokens_a if (len(tokens_a) > len(tokens_b)) else tokens_b)
assert (len(trunc_tokens) >= 1)
if (rng.random() < 0.5):
del trunc_tokens[0]
else:
trunc_tokens.pop()
|
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
|
def create_float_feature(values):
feature = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return feature
|
def add_token(tokens_a, tokens_b=None):
assert (len(tokens_a) >= 1)
tokens = []
segment_ids = []
tokens.append('[CLS]')
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append('[SEP]')
segment_ids.append(0)
if (tokens_b != None):
assert (len(tokens_b) >= 1)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append('[SEP]')
segment_ids.append(1)
return (tokens, segment_ids)
|
def parse_arguments():
parser = argparse.ArgumentParser(description='Score a prediction file using the gold labels.')
parser.add_argument('gold_file', help='The gold relation file; one relation per line')
parser.add_argument('pred_file', help='A prediction file; one relation per line, in the same order as the gold file.')
args = parser.parse_args()
return args
|
def score(key, prediction, verbose=False, NO_RELATION='NA'):
correct_by_relation = Counter()
guessed_by_relation = Counter()
gold_by_relation = Counter()
for row in range(len(key)):
gold = key[row]
guess = prediction[row]
if ((gold == NO_RELATION) and (guess == NO_RELATION)):
pass
elif ((gold == NO_RELATION) and (guess != NO_RELATION)):
guessed_by_relation[guess] += 1
elif ((gold != NO_RELATION) and (guess == NO_RELATION)):
gold_by_relation[gold] += 1
elif ((gold != NO_RELATION) and (guess != NO_RELATION)):
guessed_by_relation[guess] += 1
gold_by_relation[gold] += 1
if (gold == guess):
correct_by_relation[guess] += 1
if verbose:
print('Per-relation statistics:')
relations = gold_by_relation.keys()
longest_relation = 0
for relation in sorted(relations):
longest_relation = max(len(relation), longest_relation)
for relation in sorted(relations):
correct = correct_by_relation[relation]
guessed = guessed_by_relation[relation]
gold = gold_by_relation[relation]
prec = 1.0
if (guessed > 0):
prec = (float(correct) / float(guessed))
recall = 0.0
if (gold > 0):
recall = (float(correct) / float(gold))
f1 = 0.0
if ((prec + recall) > 0):
f1 = (((2.0 * prec) * recall) / (prec + recall))
sys.stdout.write((('{:<' + str(longest_relation)) + '}').format(relation))
sys.stdout.write(' P: ')
if (prec < 0.1):
sys.stdout.write(' ')
if (prec < 1.0):
sys.stdout.write(' ')
sys.stdout.write('{:.2%}'.format(prec))
sys.stdout.write(' R: ')
if (recall < 0.1):
sys.stdout.write(' ')
if (recall < 1.0):
sys.stdout.write(' ')
sys.stdout.write('{:.2%}'.format(recall))
sys.stdout.write(' F1: ')
if (f1 < 0.1):
sys.stdout.write(' ')
if (f1 < 1.0):
sys.stdout.write(' ')
sys.stdout.write('{:.2%}'.format(f1))
sys.stdout.write((' #: %d' % gold))
sys.stdout.write('\n')
print('')
if verbose:
print('Final Score:')
prec_micro = 1.0
if (sum(guessed_by_relation.values()) > 0):
prec_micro = (float(sum(correct_by_relation.values())) / float(sum(guessed_by_relation.values())))
recall_micro = 0.0
if (sum(gold_by_relation.values()) > 0):
recall_micro = (float(sum(correct_by_relation.values())) / float(sum(gold_by_relation.values())))
f1_micro = 0.0
if ((prec_micro + recall_micro) > 0.0):
f1_micro = (((2.0 * prec_micro) * recall_micro) / (prec_micro + recall_micro))
return (prec_micro, recall_micro, f1_micro)
|
def curve(y_scores, y_true, num=2000):
order = np.argsort(y_scores)[::(- 1)]
guess = 0.0
right = 0.0
target = np.sum(y_true)
precisions = []
recalls = []
for o in order[:num]:
guess += 1
if (y_true[o] == 1):
right += 1
precision = (right / guess)
recall = (right / target)
precisions.append(precision)
recalls.append(recall)
return (np.array(recalls), np.array(precisions))
|
def AUC_and_PN(y_scores, y_true):
(recalls, precisions) = curve(y_scores, y_true, 3000)
recalls_01 = recalls[(recalls < 0.1)]
precisions_01 = precisions[(recalls < 0.1)]
AUC_01 = auc(recalls_01, precisions_01)
recalls_02 = recalls[(recalls < 0.2)]
precisions_02 = precisions[(recalls < 0.2)]
AUC_02 = auc(recalls_02, precisions_02)
recalls_03 = recalls[(recalls < 0.3)]
precisions_03 = precisions[(recalls < 0.3)]
AUC_03 = auc(recalls_03, precisions_03)
recalls_04 = recalls[(recalls < 0.4)]
precisions_04 = precisions[(recalls < 0.4)]
AUC_04 = auc(recalls_04, precisions_04)
AUC_all = average_precision_score(y_true, y_scores)
print(AUC_01, AUC_02, AUC_03, AUC_04, AUC_all)
for (q, testdata) in enumerate([test1, test2, testall]):
(y_true, y_scores) = eval(model, testdata, args)
order = np.argsort((- y_scores))
top100 = order[:100]
correct_num_100 = 0.0
for i in top100:
if (y_true[i] == 1):
correct_num_100 += 1.0
print('P@100: ', (correct_num_100 / 100))
top200 = order[:200]
correct_num_200 = 0.0
for i in top200:
if (y_true[i] == 1):
correct_num_200 += 1.0
print('P@200: ', (correct_num_200 / 200))
top300 = order[:300]
correct_num_300 = 0.0
for i in top300:
if (y_true[i] == 1):
correct_num_300 += 1.0
print('P@300: ', (correct_num_300 / 300))
print('mean: ', ((((correct_num_100 / 100) + (correct_num_200 / 200)) + (correct_num_300 / 300)) / 3))
|
def bag_eval(pred_result, facts):
"\n Args:\n pred_result: a list with dict {'entpair': (head_id, tail_id), 'relation': rel, 'score': score}.\n Note that relation of NA should be excluded.\n Return:\n {'prec': narray[...], 'rec': narray[...], 'mean_prec': xx, 'f1': xx, 'auc': xx}\n prec (precision) and rec (recall) are in micro style.\n prec (precision) and rec (recall) are sorted in the decreasing order of the score.\n f1 is the max f1 score of those precison-recall points\n "
sorted_pred_result = sorted(pred_result, key=(lambda x: x['score']), reverse=True)
prec = []
rec = []
correct = 0
preds = []
filtered_facts = {}
total = sum([len(l) for l in facts.values()])
count = 0
for (i, item) in enumerate(sorted_pred_result):
if (len(facts[item['entpair']]) == 0):
continue
if (correct != total):
preds.append((item['entpair'], 'pred: {}, gold: {}'.format(item['relation'], str(facts[item['entpair']]))))
if (item['relation'] in facts[item['entpair']]):
correct += 1
count += 1
prec.append((float(correct) / float(count)))
rec.append((float(correct) / float(total)))
for c in [100, 200, 300]:
print('P@{}: {}'.format(c, prec[c]))
auc_result = auc(x=rec, y=prec)
np_prec = np.array(prec)
np_rec = np.array(rec)
f1 = (((2 * np_prec) * np_rec) / ((np_prec + np_rec) + 1e-20)).max()
mean_prec = np_prec.mean()
with open('bagtest_result5.txt', 'w') as f:
json.dump(prec, f)
json.dump(rec, f)
f.write('\n')
for line in preds:
f.write((str(line) + '\n'))
return {'micro_p': np_prec, 'micro_r': np_rec, 'micro_p_mean': mean_prec, 'micro_f1': f1, 'auc': auc_result}
|
class SENT_Model(nn.Module):
def __init__(self, options, vocab_file=None):
super(SENT_Model, self).__init__()
self.max_sent_len = options.max_len
self.pos_emb_dim = 50
self.ner_label_size = options.ner_label_size
self.ner_emb_dim = 50
self.vocab_size = options.vocab_size
self.neg_sample_num = 10
self.device = options.gpu
self.n_device = options.n_gpu
self.batch_size = options.batch_size
self.label_size = options.label_size
weight_matrix = torch.from_numpy(np.load(vocab_file))
self.emb_dim = weight_matrix.size(1)
if options.random:
self.word_embs = nn.Embedding(self.vocab_size, self.emb_dim)
else:
if (self.vocab_size == (weight_matrix.size(0) + 2)):
unk = (torch.randn(1, self.emb_dim) / math.sqrt(self.emb_dim))
blk = torch.zeros(1, self.emb_dim)
weight_matrix = torch.cat([weight_matrix, unk, blk], 0)
self.word_embs = nn.Embedding(self.vocab_size, self.emb_dim, _weight=weight_matrix)
self.pos1_emb = nn.Embedding((self.max_sent_len * 2), self.pos_emb_dim)
self.pos2_emb = nn.Embedding((self.max_sent_len * 2), self.pos_emb_dim)
self.ner_emb = nn.Embedding(self.ner_label_size, self.ner_emb_dim)
self.input_dim = ((self.emb_dim + (2 * self.pos_emb_dim)) + self.ner_emb_dim)
self.encoder = nn.LSTM(input_size=self.input_dim, batch_first=True, hidden_size=256, bidirectional=True)
self.decoder = nn.Sequential(nn.Linear(((self.encoder.hidden_size * 2) * 2), (self.encoder.hidden_size * 2)), nn.Tanh(), nn.Linear((self.encoder.hidden_size * 2), self.label_size))
self.drop = nn.Dropout(0.5)
self.loss_function_pos = nn.CrossEntropyLoss()
self.loss_function_neg = nn.NLLLoss()
def forward(self, train_batch, mode='train', negloss=True):
(head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels) = train_batch
labels = labels.view((- 1))
(batch_size, seq_len) = (input_ids.size(0), input_ids.size(1))
words = self.word_embs(input_ids)
pos1 = self.pos1_emb(head_pos)
pos2 = self.pos2_emb(tail_pos)
ner = self.ner_emb(ner_labels)
inputs = torch.cat([words, ner, pos1, pos2], dim=(- 1))
inputs = self.drop(inputs)
input_lens = input_masks.sum((- 1))
inputs = nn.utils.rnn.pack_padded_sequence(inputs, input_lens, batch_first=True, enforce_sorted=False)
(hiddens, _) = self.encoder(inputs)
(hiddens, _) = nn.utils.rnn.pad_packed_sequence(hiddens, batch_first=True, total_length=seq_len)
hiddens = self.drop(hiddens)
(loss, preds, right, total, probs) = self.compute_negloss(hiddens, labels, ori_token_masks, head_masks, tail_masks, negloss)
label_probs = probs.gather((- 1), labels.view((- 1), 1))
pos_right = ((preds == labels) & (labels != 0)).sum()
return (loss, preds, right, total, pos_right, label_probs, probs)
def compute_negloss(self, t, labels, ori_token_masks, head_masks, tail_masks, negloss=True):
(batch_size, seq_len, hidden_dim) = t.size()
head_masks = head_masks.bool().unsqueeze((- 1)).repeat(1, 1, hidden_dim)
tail_masks = tail_masks.bool().unsqueeze((- 1)).repeat(1, 1, hidden_dim)
heads_t = ((t * head_masks).sum(dim=1, keepdim=True) / head_masks.sum(dim=1, keepdim=True))
tails_t = ((t * tail_masks).sum(dim=1, keepdim=True) / tail_masks.sum(dim=1, keepdim=True))
pos_sample = torch.cat([heads_t, tails_t], dim=(- 1))
logits = self.decoder(pos_sample.view(batch_size, (- 1)))
loss = 0.0
if negloss:
sample_num = self.neg_sample_num
neg_probs = torch.log(((1.0 - torch.softmax(logits, dim=(- 1))) + NEAR_0))
labels_ = labels.view((- 1), 1).repeat(1, sample_num).view((batch_size * sample_num))
neg_probs = neg_probs.unsqueeze(1).repeat(1, sample_num, 1).view((batch_size * sample_num), neg_probs.size((- 1)))
neg_label = ((labels_ + torch.LongTensor(labels_.size()).cuda().random_(1, self.label_size)) % self.label_size)
loss = self.loss_function_neg(neg_probs.view((- 1), self.label_size), neg_label.view((- 1)))
else:
loss = self.loss_function_pos(logits.view((- 1), logits.size((- 1))), labels.view((- 1)))
preds = torch.argmax(logits, dim=(- 1))
right = (torch.argmax(logits, dim=(- 1)) == labels).sum()
total = (labels >= 0).sum()
probs = torch.softmax(logits, dim=(- 1))
return (loss, preds, right, total, probs)
|
class SENT_Model(nn.Module):
def __init__(self, options, vocab_file=None):
super(SENT_Model, self).__init__()
self.max_sent_len = options.max_len
self.pos_emb_dim = 50
self.ner_label_size = options.ner_label_size
self.ner_emb_dim = 50
self.vocab_size = options.vocab_size
self.neg_sample_num = 50
self.device = options.gpu
self.n_device = options.n_gpu
self.batch_size = options.batch_size
self.label_size = options.label_size
weight_matrix = torch.from_numpy(np.load(vocab_file))
self.emb_dim = weight_matrix.size(1)
if options.random:
self.word_embs = nn.Embedding(self.vocab_size, self.emb_dim)
else:
if (self.vocab_size == (weight_matrix.size(0) + 2)):
unk = (torch.randn(1, self.emb_dim) / math.sqrt(self.emb_dim))
blk = torch.zeros(1, self.emb_dim)
weight_matrix = torch.cat([weight_matrix, unk, blk], 0)
self.word_embs = nn.Embedding(self.vocab_size, self.emb_dim, _weight=weight_matrix)
self.pos1_emb = nn.Embedding((self.max_sent_len * 2), self.pos_emb_dim)
self.pos2_emb = nn.Embedding((self.max_sent_len * 2), self.pos_emb_dim)
self.ner_emb = nn.Embedding(self.ner_label_size, self.ner_emb_dim)
self.input_dim = ((self.emb_dim + (2 * self.pos_emb_dim)) + self.ner_emb_dim)
self.encoder = nn.LSTM(input_size=self.input_dim, batch_first=True, hidden_size=256, bidirectional=True)
self.decoder = nn.Sequential(nn.Linear(((self.encoder.hidden_size * 2) * 2), (self.encoder.hidden_size * 2)), nn.Tanh(), nn.Linear((self.encoder.hidden_size * 2), self.label_size))
self.drop = nn.Dropout(0.5)
self.loss_function_pos = nn.CrossEntropyLoss()
self.loss_function_neg = nn.NLLLoss()
def forward(self, train_batch, mode='train', negloss=True):
(head_pos, tail_pos, input_ids, input_masks, ori_token_masks, head_masks, tail_masks, ner_labels, labels, D_labels) = train_batch
labels = labels.view((- 1))
D_labels = D_labels.view((- 1))
(batch_size, seq_len) = (input_ids.size(0), input_ids.size(1))
words = self.word_embs(input_ids)
pos1 = self.pos1_emb(head_pos)
pos2 = self.pos2_emb(tail_pos)
ner = self.ner_emb(ner_labels)
inputs = torch.cat([words, ner, pos1, pos2], dim=(- 1))
inputs = self.drop(inputs)
inputs = inputs.to(torch.float32)
input_lens = input_masks.sum((- 1))
inputs = nn.utils.rnn.pack_padded_sequence(inputs, input_lens, batch_first=True, enforce_sorted=False)
(hiddens, _) = self.encoder(inputs)
(hiddens, _) = nn.utils.rnn.pad_packed_sequence(hiddens, batch_first=True, total_length=seq_len)
hiddens = self.drop(hiddens)
(loss, preds, right, total, probs) = self.compute_negloss(hiddens, D_labels, ori_token_masks, head_masks, tail_masks, negloss)
label_probs = probs.gather((- 1), D_labels.view((- 1), 1))
pos_right = ((preds == labels) & (labels != 0)).sum()
return (loss, preds, right, total, pos_right, label_probs, probs)
def compute_negloss(self, t, labels, ori_token_masks, head_masks, tail_masks, negloss=True):
(batch_size, seq_len, hidden_dim) = t.size()
head_masks = head_masks.bool().unsqueeze((- 1)).repeat(1, 1, hidden_dim)
tail_masks = tail_masks.bool().unsqueeze((- 1)).repeat(1, 1, hidden_dim)
heads_t = ((t * head_masks).sum(dim=1, keepdim=True) / head_masks.sum(dim=1, keepdim=True))
tails_t = ((t * tail_masks).sum(dim=1, keepdim=True) / tail_masks.sum(dim=1, keepdim=True))
pos_sample = torch.cat([heads_t, tails_t], dim=(- 1))
logits = self.decoder(pos_sample.view(batch_size, (- 1)))
loss = 0.0
if negloss:
sample_num = self.neg_sample_num
neg_probs = torch.log(((1.0 - torch.softmax(logits, dim=(- 1))) + NEAR_0))
labels_ = labels.view((- 1), 1).repeat(1, sample_num).view((batch_size * sample_num))
neg_probs = neg_probs.unsqueeze(1).repeat(1, sample_num, 1).view((batch_size * sample_num), neg_probs.size((- 1)))
neg_label = ((labels_ + torch.LongTensor(labels_.size()).cuda().random_(1, self.label_size)) % self.label_size)
loss = self.loss_function_neg(neg_probs.view((- 1), self.label_size), neg_label.view((- 1)))
else:
loss = self.loss_function_pos(logits.view((- 1), logits.size((- 1))), labels.view((- 1)))
preds = torch.argmax(logits, dim=(- 1))
right = (torch.argmax(logits, dim=(- 1)) == labels).sum()
total = (labels >= 0).sum()
probs = torch.softmax(logits, dim=(- 1))
return (loss, preds, right, total, probs)
|
def load_data(args, mode='train'):
data_path = (((args.save_data_path + '.') + mode) + '.data')
if os.path.exists(data_path):
print('Loading {} data from {}...'.format(mode, data_path))
with open(data_path, 'rb') as f:
data = pickle.load(f)
else:
data = Data(args, mode)
print('Saving {} data to {}...'.format(mode, data_path))
with open(data_path, 'wb') as f:
pickle.dump(data, f)
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.