2018-12-31
¶«Re-generate tests (with trees production) for POCE & CDS for all baseline parse&WSD variants»
3-level category tree, Link Grammar 5.5.1, test_grammar
updated 2018-10-19, server 94.130.238.118
This notebook is shared as static POC-English-2018-12-31.html,
output data shared via POC-English-2018-12-31 directory.
Previous (reference) tests:
POC-English-2018-10-23.html,
POC-English-Amb-2018-10-21.html,
POC-English-Amb-2018-08-09.html
import os, sys, time
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
from src.grammar_learner.utl import UTC
from src.grammar_learner.read_files import check_dir
from src.grammar_learner.write_files import list2file
from src.grammar_learner.widgets import html_table
from src.grammar_learner.pqa_table import table_rows
tmpath = module_path + '/tmp/'
check_dir(tmpath, True, 'none')
table = []
long_table = []
start = time.time()
print(UTC(), ':: module_path =', module_path)
corpus = 'POC-English-Amb'
out_dir = module_path + '/output/POC-English-' + str(UTC())[:10]
runs = (1,1)
if runs != (1,1): out_dir += '-multi'
kwargs = {
'min_word_count': 1 , # SVS (sparse vector space) parameter
'min_link_count': 1 , # SVS
'max_words' : 100000 , # SVS: max space dimension 1
'max_features' : 100000 , # SVS: dimension 2: disjuncts/connectors
'min_co-occurrence_count': 1 , # SVS
'left_wall' : '' ,
'period' : False ,
'word_space' : 'vectors' ,
'clustering' : ('kmeans', 'kmeans++', 10),
'cluster_range' : (2,50,1,5) ,
'cluster_criteria': 'silhouette',
'clustering_metric': ('silhouette', 'cosine'),
'rules_merge' : 0.8 , # grammar rules merge threshold
'rules_aggregation' : 0.2 , # grammar rules aggregation threshold
'top_level' : 0.01 , # top-level rules generalization threshold
'tmpath' : tmpath ,
'verbose' : 'min' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 ,
'categories_generalization': 'off'}
lines = [
[11, 'POC-English-Amb' , 'MST-fixed-manually' , 0 , 0 , 'none' ],
[12, 'POC-English-Amb' , 'MST-fixed-manually' , 0 , 0 , 'rules' ],
[13, 'POC-English-Amb' , 'LG-English' , 0 , 0 , 'none' ],
[14, 'POC-English-Amb' , 'LG-English' , 0 , 0 , 'rules' ],
[15, 'POC-English-Amb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'none' ],
[16, 'POC-English-Amb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'rules' ],
[17, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'none' ],
[18, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'rules' ],
[19, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R-agm-opt' , 0 , 0 , 'none' ],
[20, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R-agm-opt' , 0 , 0 , 'rules' ],
[21, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R-agm-100' , 0 , 0 , 'none' ],
[22, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R-agm-100' , 0 , 0 , 'rules' ],
[23, 'POC-English-Amb' , 'R=6-Weight=1-mst-weight=+1:R' , 0 , 0 , 'none' ],
[24, 'POC-English-disAmb' , 'R=6-Weight=1-mst-weight=+1:R' , 0 , 0 , 'none' ],
[25, 'POC-English-disAmb' , 'R=6-Weight=1-mst-weight=+1:R-agm-opt' , 0 , 0 , 'none' ],
[26, 'POC-English-disAmb' , 'R=6-Weight=1-mst-weight=+1:R-agm-100' , 0 , 0 , 'none' ],
[27, 'POC-English-Amb' , 'LG-ANY-all-parses' , 0 , 0 , 'none' ],
[28, 'POC-English-disAmb' , 'LG-ANY-all-parses' , 0 , 0 , 'none' ],
[29, 'POC-English-disAmb' , 'LG-ANY-all-parses-agm-opt' , 0 , 0 , 'none' ],
[30, 'POC-English-disAmb' , 'LG-ANY-all-parses-agm-100' , 0 , 0 , 'none' ]]
rp = module_path + '/data/POC-English-Amb/MST-fixed-manually/poc-english_ex-parses-gold.txt'
cp = rp # corpus path = reference path
%%capture
kwargs['context'] = 1
kwargs['grammar_rules'] = 1
kwargs['word_space'] = 'vectors'
kwargs['clustering'] = ('kmeans', 'kmeans++', 10)
average21, long21, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average21)
long_table.extend(long21)
display(html_table([header]+average21))
%%capture
kwargs['grammar_rules'] = 2
average22, long22, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average22)
long_table.extend(long22)
display(html_table([header]+average22))
%%capture
kwargs['context'] = 2
average23, long23, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average23)
long_table.extend(long23)
display(html_table([header]+average23))
%%capture
kwargs['context'] = 2
kwargs['word_space'] = 'discrete'
kwargs['clustering'] = 'group'
kwargs['grammar_rules'] = 2
average24, long24, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
%%capture
kwargs['word_space'] = 'sparse'
kwargs['cluster_range'] = (2,36,1,1)
kwargs['clustering'] = ('agglomerative', 'ward')
kwargs['clustering_metric'] = ('silhouette', 'cosine')
average25, long25, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average25)
long_table.extend(long25)
display(html_table([header]+average25))
display(html_table([header]+long_table))
print(UTC(), ':: finished, elapsed', str(round((time.time()-start)/3600, 1)), 'hours')
table_str = list2file(table, out_dir+'/short_table.txt')
if runs == (1,1):
print('Results saved to', out_dir + '/short_table.txt')
else:
long_table_str = list2file(long_table, out_dir+'/long_table.txt')
print('Average results saved to', out_dir + '/short_table.txt\n'
'Detailed results for every run saved to', out_dir + '/long_table.txt')