2018-08-09
¶Updated optimal clustering search (2018-08-05), server 88.99.210.144
Each line is calculated 1x, parsing metrics tested 1x for each calculation.
The calculation table is shared as 'short_table.txt' in data folder
http://langlearn.singularitynet.io/data/clustering_2018/POC-English-Amb-2018-08-09/
This notebook is shared as static html via
http://langlearn.singularitynet.io/data/clustering_2018/html/POC-English-Amb-2018-08-09.html
A multi-run version of this test is NOT shared via
http://langlearn.singularitynet.io/data/clustering_2018/html/POC-English-Amb-2018-08-08-multi.html
import os, sys, time
from IPython.display import display
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
grammar_learner_path = module_path + '/src/grammar_learner/'
if grammar_learner_path not in sys.path: sys.path.append(grammar_learner_path)
from utl import UTC
from read_files import check_dir
from widgets import html_table
from pqa_table import table_damb
tmpath = module_path + '/tmp/'
if check_dir(tmpath, True, 'none'):
table = []
long_table = []
header = ['Line','Corpus','Parsing','LW','"."','Generalization','Space','Rules','PA','PQ']
start = time.time()
print(UTC(), ':: module_path =', module_path)
else: print(UTC(), ':: could not create temporary files directory', tmpath)
corpus = 'POC-English-Amb'
out_dir = module_path + '/output/'+ corpus + '-' + str(UTC())[:10]
runs = (1,1) # (attempts to learn grammar per line, grammar tests per attempt)
if runs != (1,1): out_dir += '-multi'
kwargs = {
'left_wall' : '' ,
'period' : False ,
'word_space' : 'vectors' ,
'clustering' : ('kmeans', 'kmeans++', 10),
'cluster_range' : (2,50,3) ,
'tmpath' : tmpath ,
'verbose' : 'min' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 ,
'categories_generalization': 'off'}
lines = [
[26, 'POC-English-Amb' , 'MST-fixed-manually' ,'LW','.', 'none' ],
[27, 'POC-English-Amb' , 'LG-English' ,'LW','.', 'none' ],
[28, 'POC-English-Amb' , 'R=6-Weight=6:R-mst-weight=+1:R' ,'LW','.', 'none' ],
[29, 'POC-English-Amb' , 'MST-fixed-manually' , 0 , 0 , 'none' ],
[30, 'POC-English-Amb' , 'MST-fixed-manually' , 0 , 0 , 'rules' ],
[31, 'POC-English-Amb' , 'LG-English' , 0 , 0 , 'none' ],
[32, 'POC-English-Amb' , 'LG-English' , 0 , 0 , 'rules' ],
[33, 'POC-English-Amb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'none' ],
[34, 'POC-English-Amb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'rules' ],
[35, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'none' ],
[36, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'rules' ],
[37, 'POC-English-disAmb' , 'R=6-Weight=6:R-mst-weight=+1:R-adagram' , 0 , 0 , 'rules' ],
[38, 'POC-English-Amb' , 'R=6-Weight=1-mst-weight=+1:R' , 0 , 0 , 'none' ],
[39, 'POC-English-disAmb' , 'R=6-Weight=1-mst-weight=+1:R' , 0 , 0 , 'none' ],
[40, 'POC-English-disAmb' , 'R=6-Weight=1-mst-weight=+1:R' , 0 , 0 , 'rules' ],
[41, 'POC-English-disAmb' , 'R=6-Weight=1-mst-weight=+1:R-adagram' , 0 , 0 , 'rules' ],
[42, 'POC-English-Amb' , 'LG-ANY-all-parses' , 0 , 0 , 'none' ],
[43, 'POC-English-disAmb' , 'LG-ANY-all-parses' , 0 , 0 , 'none' ],
[44, 'POC-English-disAmb' , 'LG-ANY-all-parses' , 0 , 0 , 'rules' ],
[45, 'POC-English-disAmb' , 'LG-ANY-all-parses-adagram' , 0 , 0 , 'rules' ]]
rp = module_path + '/data/POC-English-Amb/MST-fixed-manually/poc-english_ex-parses-gold.txt'
cps = (rp,rp) # (test) corpus_paths for Amb and disAmb corpora
rps = (rp,rp) # reference_paths for Amb and disAmb corpora
ULL Project Plan ⇒ Parses ⇒ lines 20-39
%%capture
kwargs['context'] = 1
kwargs['grammar_rules'] = 1
average21, long21 = table_damb(lines, out_dir, cps, rps, runs, **kwargs)
table.extend(average21)
long_table.extend(long21)
display(html_table([header]+average21))
print(UTC())
%%capture
kwargs['grammar_rules'] = 2
average22, long22 = table_damb(lines, out_dir, cps, rps, runs, **kwargs)
table.extend(average22)
long_table.extend(long22)
display(html_table([header]+average22))
print(UTC())
%%capture
kwargs['context'] = 2
average23, long23 = table_damb(lines, out_dir, cps, rps, runs, **kwargs)
table.extend(average23)
long_table.extend(long23)
display(html_table([header]+average23))
print(UTC())
%%capture
kwargs['word_space'] = 'discrete'
kwargs['clustering'] = 'group'
average24, long24 = table_damb(lines, out_dir, cps, rps, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
print(UTC())
runs > (1.1)
)¶display(html_table([header]+long_table))
from write_files import list2file
print(UTC(), ':: finished, elapsed', str(round((time.time()-start)/3600, 1)), 'hours')
table_str = list2file(table, out_dir+'/short_table.txt')
if runs == (1,1):
print('Results saved to', out_dir + '/short_table.txt')
else:
long_table_str = list2file(long_table, out_dir+'/long_table.txt')
print('Average results saved to', out_dir + '/short_table.txt\n'
'Detailed results for every run saved to', out_dir + '/long_table.txt')