2018-08-28
¶Updated (2018-08-14) Grammar Tester, server 94.130.238.118
Each line is calculated 1x, parsing metrics tested 1x for each calculation.
The calculation table is shared as 'short_table.txt' in data folder
http://langlearn.singularitynet.io/data/clustering_2018/Random-Clusters-CDS-2018-08-28/
This notebook is shared as static html via
http://langlearn.singularitynet.io/data/clustering_2018/html/Random-Clusters-CDS-2018-08-28.html
The constituency test (multi-run version of this notebook) is shared via
http://langlearn.singularitynet.io/data/clustering_2018/html/Random-Clusters-CDS-2018-08-28-multi.html
import os, sys, time
from IPython.display import display
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
grammar_learner_path = module_path + '/src/grammar_learner/'
if grammar_learner_path not in sys.path: sys.path.append(grammar_learner_path)
from utl import UTC
from read_files import check_dir
from widgets import html_table
from pqa_table import table_cds
tmpath = module_path + '/tmp/'
if check_dir(tmpath, True, 'none'):
table = []
long_table = []
header = ['Line','Corpus','Parsing','LW','"."','Generalization','Space','Rules','PA','PQ']
start = time.time()
print(UTC(), ':: module_path =', module_path)
else: print(UTC(), ':: could not create temporary files directory', tmpath)
out_dir = module_path + '/output/Random-Clusters-' + str(UTC())[:10] + '-50-clusters'
runs = (1,1) # (attempts to learn grammar per line, grammar tests per attempt)
if runs != (1,1): out_dir += '-multi'
kwargs = {
'left_wall' : '' ,
'period' : False ,
'clustering' : ('kmeans', 'kmeans++', 10),
'cluster_range' : (50,50,3) , # max, min, repeat 120.30.3
'cluster_criteria': 'silhouette',
'cluster_level' : 1 ,
'tmpath' : tmpath ,
'verbose' : 'min' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 ,
'categories_generalization': 'off' }
lines = [
[58, 'CDS-caps-br-text+brent9mos' , 'LG-English' ,0,0, 'none' ],
[60, 'CDS-caps-br-text+brent9mos' , 'R=6-Weight=6:R-mst-weight=+1:R' ,0,0, 'none' ]]
rp = module_path + '/data/CDS-caps-br-text+brent9mos/LG-English'
cp = rp # corpus path = reference_path :: use 'gold' parses as test corpus
"Connector-based rules" style interconnection:
C01: {C01C01- or C02C01- or ... or CnC01-} and {C01C01+ or C01C02+ or ... or C01Cn+} ...
Cxx: {C01Cxx- or C02Cxx- or ... or CnCxx-} and {CxxC01+ or CxxC02+ or ... or CxxCn+} ...
where n -- number of clusters, Cn -- n-th cluster, Cx -- x-th cluster of {C01 ... Cn}
kwargs['context'] = 1
kwargs['word_space'] = 'none'
kwargs['clustering'] = 'random'
%%capture
kwargs['grammar_rules'] = -1
average21, long21 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average21)
long_table.extend(long21)
display(html_table([header]+average21))
print(UTC())
%%capture
kwargs['grammar_rules'] = 1
average22, long22 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average22)
long_table.extend(long22)
display(html_table([header]+average22))
print(UTC())
%%capture
kwargs['grammar_rules'] = 2
average23, long23 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average23)
long_table.extend(long23)
display(html_table([header]+average23))
print(UTC())
Every cluster is linked to all clusters with single-link disjuncts:
C01: (C01C01-) or (C02C01-) or ... (CnC01-) or (C01C01+) or (C01C02+) or ... (C01Cn+) ...
Cxx: (C01Cxx-) or (C02Cxx-) or ... (CnCxx-) or (CxxC01+) or (CxxC02+) or ... (CxxCn+) ...
where n -- number of clusters, Cn -- n-th cluster, Cx -- x-th cluster of {C01 ... Cn}
%%capture
kwargs['grammar_rules'] = -2
average24, long24 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
print(UTC())
kwargs['word_space'] = 'vectors'
kwargs['clustering'] = 'kmeans'
%%capture
kwargs['context'] = 1
kwargs['grammar_rules'] = 1
average24, long24 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
print(UTC())
%%capture
kwargs['grammar_rules'] = 2
average24, long24 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
print(UTC())
%%capture
kwargs['context'] = 2
average24, long24 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
print(UTC())
%%capture
kwargs['grammar_rules'] = 1
average24, long24 = table_cds(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
print(UTC())
display(html_table([header]+long_table))
from write_files import list2file
print(UTC(), ':: finished, elapsed', str(round((time.time()-start)/3600.0, 1)), 'hours')
table_str = list2file(table, out_dir+'/short_table.txt')
if runs == (1,1):
print('Results saved to', out_dir + '/short_table.txt')
else:
long_table_str = list2file(long_table, out_dir+'/long_table.txt')
print('Average results saved to', out_dir + '/short_table.txt\n'
'Detailed results for every run saved to', out_dir + '/long_table.txt')