2018-11-23_
¶Link Grammar 5.4.4, test_grammar
updated 2018-10-19.
This notebook is shared as static Grammar-Rules-Generalization-2018-11-23_.html
Test resutls table is saved as table.txt
in clustering_2018/Grammar-Rules-Generalization-2018-11-23_
folder, output data -- in the relevant subfolders of the folder.
import os, sys, time
from collections import OrderedDict, Counter
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
from src.grammar_learner.utl import UTC
from src.grammar_learner.read_files import check_dir
from src.grammar_learner.write_files import list2file
from src.grammar_learner.widgets import html_table
from src.grammar_learner.pqa_table import table_rows, wide_rows
tmpath = module_path + '/tmp/'
check_dir(tmpath, True, 'none')
table = []
start = time.time()
print(UTC(), ':: module_path =', module_path)
out_dir = module_path + '/output/Grammar-Rules-Generalization-' + str(UTC())[:10] + '_'
corpus = 'CDS-br-text' # 'CDS-caps-br-text' shortened -- 2018-11-22
dataset = 'LG-E-clean' # 2018-10-29: only 100% parsed, shorter names:
# dataset = 'LG-English'
# dataset = 'MST_6:R+1:R' # shorter name in 'CDS-br-text'
lines = [[0, corpus, dataset, 0, 0, 'none'],
[1, corpus, dataset, 0, 0, 'rules'],
[2, corpus, dataset, 0, 0, 'updated'],
[3, corpus, dataset, 0, 0, 'new']]
rp = module_path + '/data/CDS-br-text/LG-E-clean' # "clean-clean" renamed
cp = rp # corpus path = reference_path :: use 'gold' parses as test corpus
runs = (1,1)
kwargs = {
'left_wall' : '' ,
'period' : False ,
'context' : 2 ,
'word_space' : 'sparse' ,
'clustering' : ['agglomerative', 'ward', 'euclidean'],
'cluster_range' : 400 ,
'clustering_metric' : ['silhouette', 'cosine'],
'grammar_rules' : 2 ,
'rules_merge' : 0.8 , # grammar rules merge threshold
'rules_aggregation' : 0.2 , # grammar rules aggregation threshold
'top_level' : 0.01 , # top-level rules generalization threshold
'tmpath' : tmpath ,
'verbose' : 'min' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 }
%%capture
kwargs['clustering'] = ['agglomerative', 'ward', 'euclidean']
kwargs['cluster_range'] = 400
kwargs['rules_aggregation'] = 0.1 # default 0.2
a0, _, header, log0, rules0 = wide_rows([lines[0], lines[3]], out_dir, cp, rp, runs, **kwargs)
display(html_table([header] + a0))
display(html_table([header] + a0))
3 levels of generalization:
Column "G12n" (Generalization) describes levels 2,3 agglomeration:
%%capture
kwargs['clustering'] = ['agglomerative', 'ward', 'euclidean']
kwargs['rules_aggregation'] = 0.1 # default 0.2
a1, _, header, log1, rules1 = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
display(html_table([header] + a1))
%%capture
kwargs['clustering'] = ['agglomerative', 'complete', 'manhattan']
a2, _, header, log2, rules2 = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
display(html_table([header] + a2))
%%capture
kwargs['rules_aggregation'] = 0.1
kwargs['clustering'] = ['agglomerative', 'complete', 'cosine']
a3, _, header, log3, rules3 = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
display(html_table([header] + a3))
%%capture
kwargs['rules_aggregation'] = 0.05 # no generalization wiht 0.1 similarity!
kwargs['clustering'] = ['agglomerative', 'average', 'cosine']
a4, _, header, log4, rules4 = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
display(html_table([header] + a4))
%%capture
kwargs['rules_aggregation'] = 0.1
t1 = []
n = 0
for linkage in ['ward', 'complete', 'average']:
n += 1
m = 0
for affinity in ['euclidean', 'manhattan', 'cosine']:
if linkage == 'ward' and affinity != 'euclidean': continue
# m += 1
# lines[0][0] = round(n + 0.1*m, 1)
lines[0][0] = ''
m += 1
lines[1][0] = round(n + 0.1*m, 1)
m += 1
lines[2][0] = round(n + 0.1*m, 1)
m += 1
lines[3][0] = round(n + 0.1*m, 1)
kwargs['clustering'] = ['agglomerative', linkage, affinity]
a, _, header, log, _ = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
t1.extend(a)
table.extend(a)
display(html_table([header] + t1))
%%capture
kwargs['rules_aggregation'] = 0.05
t2 = []
# n = 0
for linkage in ['ward', 'complete', 'average']:
n += 1
m = 0
for affinity in ['euclidean', 'manhattan', 'cosine']:
if linkage == 'ward' and affinity != 'euclidean': continue
# m += 1
# lines[0][0] = round(n + 0.1*m, 1)
lines[0][0] = ''
m += 1
lines[1][0] = round(n + 0.1*m, 1)
m += 1
lines[2][0] = round(n + 0.1*m, 1)
m += 1
lines[3][0] = round(n + 0.1*m, 1)
kwargs['clustering'] = ['agglomerative', linkage, affinity]
a, _, header, log, _ = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
t2.extend(a)
table.extend(a)
display(html_table([header] + t2))
%%capture
kwargs['cluster_range'] = 200
kwargs['rules_aggregation'] = 0.1
t4 = []
# n = 0
for linkage in ['ward', 'complete', 'average']:
n += 1
m = 0
for affinity in ['euclidean', 'manhattan', 'cosine']:
if linkage == 'ward' and affinity != 'euclidean': continue
lines[0][0] = ''
m += 1
lines[1][0] = round(n + 0.1*m, 1)
m += 1
lines[2][0] = round(n + 0.1*m, 1)
m += 1
lines[3][0] = round(n + 0.1*m, 1)
kwargs['clustering'] = ['agglomerative', linkage, affinity]
a, _, header, log, _ = wide_rows(lines, out_dir, cp, rp, runs, **kwargs)
t4.extend(a)
table.extend(a)
display(html_table([header] + t4))
display(html_table([header] + table))
print(UTC(), ':: finished, elapsed', str(round((time.time()-start)/3600.0, 1)), 'hours')
table_str = list2file(table, out_dir + '/table.txt')
print('Results saved to', out_dir + '/table.txt')