|
from nltk.corpus import LazyCorpusLoader |
|
from nltk.corpus.reader import WordNetCorpusReader, CorpusReader |
|
from nltk.corpus import wordnet as wn30, wordnet_ic |
|
import numpy as np |
|
import matplotlib.pyplot as plt |
|
from sklearn.preprocessing import minmax_scale |
|
|
|
wn31 = LazyCorpusLoader( |
|
"wordnet31", |
|
WordNetCorpusReader, |
|
LazyCorpusLoader("omw", CorpusReader, r".*/wn-data-.*\.tab", encoding="utf8"), |
|
) |
|
|
|
metrics = ['Path', 'Leacock-Chodorow', 'Wu-Palmer', 'Resnik', 'Jiang-Conrath', 'Lin'] |
|
|
|
|
|
def write_similarities(): |
|
|
|
brown_ic = wordnet_ic.ic('ic-brown.dat') |
|
|
|
places_classes = [] |
|
places_synsets = [] |
|
|
|
with open('places365_labels.txt', "r") as f: |
|
|
|
for line in f: |
|
|
|
fields = line.split() |
|
_class = { |
|
'full_name': fields[0], |
|
'cleaned_name': fields[0].split('/')[2].replace("_", " "), |
|
'index': int(fields[1]), |
|
'synsets': [wn31.synset(f) for f in fields[2:]] |
|
} |
|
|
|
places_classes.append(_class) |
|
places_synsets += _class['synsets'] |
|
|
|
scores = [] |
|
|
|
with open("imagenet_places_similarities.txt", "w") as o: |
|
o.write("in_synset;in_label;Path;Path_synset;Leacock-Chodorow;\ |
|
LC_synset;Wu-Palmer;WP_synset;Resnik;Resnik_synset;Jiang-Conrath;JC_synset;Lin;Lin_synset\n") |
|
with open('LOC_synset_mapping.txt', "r") as f: |
|
|
|
for line in f: |
|
|
|
fields = line.split() |
|
synset_id = fields[0] |
|
|
|
pos = synset_id[0] |
|
offset = int(synset_id[1:]) |
|
synset = wn30.synset_from_pos_and_offset(pos, offset) |
|
|
|
similarities = { |
|
'Path': [s.path_similarity(synset) for s in places_synsets], |
|
'Leacock-Chodorow': [s.lch_similarity(synset) for s in places_synsets], |
|
'Wu-Palmer': [s.wup_similarity(synset) for s in places_synsets], |
|
'Resnik': [s.res_similarity(synset, brown_ic) for s in places_synsets], |
|
'Jiang-Conrath': [s.jcn_similarity(synset, brown_ic) for s in places_synsets], |
|
'Lin': [s.lin_similarity(synset, brown_ic) for s in places_synsets], |
|
} |
|
|
|
most_similar = {} |
|
for metric in similarities: |
|
val = max(similarities[metric]) |
|
idx = similarities[metric].index(val) |
|
|
|
most_similar[metric] = { |
|
'value': val, |
|
'synset': places_synsets[idx] |
|
} |
|
|
|
scores.append((fields[1:], most_similar)) |
|
|
|
sims = ";".join([";".join([str(most_similar[metric]['value']), |
|
most_similar[metric]['synset'].name()]) for metric in metrics]) |
|
label = " ".join(fields[1:]) |
|
output_line = ";".join([fields[0], label, sims]) |
|
o.write(output_line + "\n") |
|
|
|
|
|
def read_similarities(): |
|
|
|
res = [] |
|
|
|
with open('imagenet_places_similarities.txt', "r") as f: |
|
for line in f.readlines()[1:]: |
|
|
|
fields = line.split(";") |
|
obj = {} |
|
cnt = 2 |
|
for m in metrics: |
|
obj[m] = { |
|
'value': float(fields[cnt]), |
|
'synset': wn31.synset(fields[cnt + 1]) |
|
} |
|
cnt += 2 |
|
|
|
res.append((fields[1], obj)) |
|
|
|
return res |
|
|
|
|
|
def draw_histogram(data, labels, name): |
|
fig, axs = plt.subplots(len(labels), 1, figsize=(6.4, 10)) |
|
|
|
for i, l in enumerate(labels): |
|
axs[i].set_title(l) |
|
axs[i].hist(data[:, i], bins=30) |
|
|
|
fig.show() |
|
fig.savefig(name) |
|
|
|
|
|
def analysis(data): |
|
similarity_arr = np.array([[c[1][m]['value'] for m in metrics] for c in data]) |
|
|
|
draw_histogram(similarity_arr, metrics, "histogram") |
|
|
|
scaled_data = minmax_scale(similarity_arr, axis=0, feature_range=(0, 1)) |
|
columns = metrics + ["Average", "Average (no ic)"] |
|
avg = np.average(scaled_data, axis=1).reshape(-1, 1) |
|
avg_no_ic = np.average(scaled_data[:, 0:3], axis=1).reshape(-1, 1) |
|
data_and_average = np.concatenate((scaled_data, avg, avg_no_ic), axis=1) |
|
|
|
draw_histogram(data_and_average, columns, "scaled_histogram") |
|
|
|
fig, ax = plt.subplots(figsize=(20, 180)) |
|
im = ax.pcolormesh(data_and_average) |
|
fig.colorbar(im, ax=ax) |
|
ax.set_xticks(0.5 + np.arange(len(columns))) |
|
ax.set_yticks(0.5 + np.arange(len(data))) |
|
ax.set_xticklabels(labels=columns, rotation=90) |
|
ax.set_yticklabels(labels=[c[0].split(",")[0] for c in data]) |
|
|
|
for y in range(len(data)): |
|
for x in range(len(metrics)): |
|
ax.text(x+0.1, y+0.2, data[y][1][metrics[x]]['synset'].name(), color="grey") |
|
|
|
ax.text(len(columns) - 1 + 0.1, y + 0.2, "%.2f" % data_and_average[y, -1], color="grey") |
|
fig.show() |
|
fig.savefig("heatmap") |
|
|
|
return data_and_average |
|
|
|
|
|
def write_output(similarities): |
|
|
|
with open("in_wordnet_oodness.txt", "w") as f: |
|
for s in similarities: |
|
f.write("%.2f\n" % (1 - s)) |
|
|
|
|
|
def main(): |
|
|
|
scores = read_similarities() |
|
res = analysis(scores) |
|
write_output(res[:, -1]) |
|
|
|
|
|
main() |
|
|