2018-10-03
¶Connector identificators changed (2018-09-26): C01 ⇒ B/AB/AAB...
.dict file format updated to Link Grammar 5.5.1 -- Local ULL-DEV environment.
Each line is calculated 1x, parsing metrics tested 1x for each calculation.
The calculation table is shared as 'short_table.txt' in data folder
http://langlearn.singularitynet.io/data/clustering_2018/POC-English-NoAmb-2018-10-03/
This notebook is shared as static html via
http://langlearn.singularitynet.io/data/clustering_2018/html/POC-English-NoAmb-2018-10-03.html
import os, sys, time
from IPython.display import display
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
from src.grammar_learner.utl import UTC
from src.grammar_learner.read_files import check_dir
from src.grammar_learner.widgets import html_table
from src.grammar_learner.pqa_table import table_row # updated, incl. silhouette index
from src.grammar_learner.write_files import list2file
tmpath = module_path + '/tmp/'
check_dir(tmpath, True, 'none')
table = []
long_table = []
header = ['Line','Corpus','Parsing','LW','RW','Gen.','Space','Rules','Silhouette','PA','PQ']
start = time.time()
print(UTC(), ':: module_path =', module_path)
corpus = 'POC-English-NoAmb'
out_dir = module_path + '/output/'+ corpus + '-' + str(UTC())[:10]
runs = (1,1) # (attempts to learn grammar per line, grammar tests per attempt)
if runs != (1,1): out_dir += '-multi'
kwargs = {
'left_wall' : '' ,
'period' : False ,
'word_space' : 'vectors' ,
'cluster_range' : (50,2,9) , # (2,30,1,5) - 5 is not enough
'clustering' : ('kmeans', 'kmeans++', 18),
'tmpath' : tmpath ,
'verbose' : 'max' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 ,
'categories_generalization': 'off' }
lines = [
[14, 'POC-English-NoAmb', 'MST-fixed-manually' ,'LW','.', 'none' ],
[15, 'POC-English-NoAmb', 'MST-fixed-manually' , 0 , 0 , 'none' ],
[16, 'POC-English-NoAmb', 'LG-English' ,'LW','.', 'none' ],
[17, 'POC-English-NoAmb', 'LG-English' , 0 , 0 , 'none' ],
[18, 'POC-English-NoAmb', 'R=6-Weight=6:R-mst-weight=+1:R' ,'LW','.', 'none' ],
[19, 'POC-English-NoAmb', 'R=6-Weight=6:R-mst-weight=+1:R' , 0 , 0 , 'none' ],
[20, 'POC-English-NoAmb', 'R=6-Weight=1-mst-weight=+1:R' ,'LW','.', 'none' ],
[21, 'POC-English-NoAmb', 'R=6-Weight=1-mst-weight=+1:R' , 0 , 0 , 'none' ],
[22, 'POC-English-NoAmb', 'LG-ANY-mst-weight=+1:R' ,'LW','.', 'none' ],
[23, 'POC-English-NoAmb', 'LG-ANY-mst-weight=+1:R' , 0 , 0 , 'none' ]]
# cp,rp :: (test) corpus_path and reference_path
cp = module_path + '/data/POC-English-NoAmb/poc_english_noamb_corpus.txt'
rp = module_path + '/data/POC-English-NoAmb/poc-english_noAmb-parses-gold.txt'
#cp = rp # July 2018 Grammar Tester option
%%capture
kwargs['context'] = 1
kwargs['word_space'] = 'vectors'
kwargs['clustering'] = ('kmeans','kmeans++',18)
#kwargs['cluster_range'] = (50,2,9) # thorough clustering search for max silhouette index
kwargs['grammar_rules'] = 1
average21, long21 = table_row(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average21)
long_table.extend(long21)
display(html_table([header]+average21))
%%capture
kwargs['grammar_rules'] = 2
average22, long22 = table_row(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average22)
long_table.extend(long22)
display(html_table([header]+average22))
%%capture
kwargs['context'] = 2
average23, long23 = table_row(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average23)
long_table.extend(long23)
display(html_table([header]+average23))
%%capture
kwargs['word_space'] = 'discrete'
kwargs['clustering'] = 'group'
average24, long24 = table_row(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
runs > (1.1)
)¶display(html_table([header]+long_table))
print(UTC(), ':: finished, elapsed', str(round((time.time()-start)/60, 1)), 'min')
table_str = list2file(table, out_dir+'/short_table.txt')
if runs == (1,1):
print('Results saved to', out_dir + '/short_table.txt')
else:
long_table_str = list2file(long_table, out_dir+'/long_table.txt')
print('Average results saved to', out_dir + '/short_table.txt\n'
'Detailed results for every run saved to', out_dir + '/long_table.txt')