2018-11-30
beta version (parts)¶Based on Child-Directed-Speech-2018-10-24.html. Non-working tests 2.1, 2.2 (cDRK*) deleted, to be added to the final version.
This notebook is shared as static Child-Directed-Speech-2018-11-30.html,
data -- Child-Directed-Speech-2018-11-30 directory.
Previous (reference) tests:
Child-Directed-Speech-2018-10-19.html,
Child-Directed-Speech-2018-08-14.html,
Child-Directed-Speech-2018-08-06.html.
import os, sys, time
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
from src.grammar_learner.utl import UTC
from src.grammar_learner.read_files import check_dir
from src.grammar_learner.write_files import list2file
from src.grammar_learner.widgets import html_table
from src.grammar_learner.pqa_table import table_rows
tmpath = module_path + '/tmp/'
check_dir(tmpath, True, 'none')
table = []
long_table = []
start = time.time()
print(UTC(), ':: module_path =', module_path)
out_dir = module_path + '/output/Child-Directed-Speech-' + str(UTC())[:10] + '_'
runs = (1,1)
if runs != (1,1): out_dir += '-multi'
kwargs = {
'left_wall' : '' ,
'period' : False ,
'word_space' : 'vectors' ,
'clustering' : ['kmeans', 'kmeans++', 10],
'cluster_range' : [30,120,3,3],
'cluster_criteria' : 'silhouette',
'clustering_metric' : ['silhouette', 'cosine'],
'cluster_level' : 1 ,
'rules_merge' : 0.8 , # grammar rules merge threshold
'rules_aggregation' : 0.2 , # grammar rules aggregation threshold
'top_level' : 0.01 , # top-level rules generalization threshold
'tmpath' : tmpath ,
'verbose' : 'min' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 }
lines = [
[33, 'CDS-caps-br-text+brent9mos' , 'LG-English' ,0,0, 'none' ],
[34, 'CDS-caps-br-text+brent9mos' , 'LG-English' ,0,0, 'rules' ],
[35, 'CDS-caps-br-text+brent9mos' , 'R=6-Weight=6:R-mst-weight=+1:R' ,0,0, 'none' ],
[36, 'CDS-caps-br-text+brent9mos' , 'R=6-Weight=6:R-mst-weight=+1:R' ,0,0, 'rules' ]]
cp = rp = module_path + '/data/CDS/LG-E-clean' # clean: both files, 100% parsed
cp = rp # test corpus path = reference_path
%%capture
kwargs['context'] = 2
kwargs['word_space'] = 'vectors'
kwargs['clustering'] = 'kmeans'
kwargs['grammar_rules'] = 2
average23, long23, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average23)
long_table.extend(long23)
display(html_table([header]+average23))
%%capture
kwargs['context'] = 2
kwargs['word_space'] = 'discrete'
kwargs['clustering'] = 'group'
kwargs['grammar_rules'] = 2
average24, long24, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average24)
long_table.extend(long24)
display(html_table([header]+average24))
%%capture
kwargs['clustering'] = ['agglomerative', 'ward']
kwargs['clustering_metric'] = ['silhouette', 'cosine']
kwargs['min_word_count'] = 1
kwargs['min_link_count'] = 1
kwargs['min_co-occurrence_count'] = 1
kwargs['cluster_range'] = 400
average31, long31, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average31)
long_table.extend(long31)
display(html_table([header] + average31))
%%capture
kwargs['clustering'] = ['agglomerative', 'complete', 'cosine']
average32, long32, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average32)
long_table.extend(long32)
display(html_table([header] + average32))
%%capture
kwargs['clustering'] = ['agglomerative', 'complete', 'cosine']
average33, long33, header = table_rows(lines, out_dir, cp, rp, runs, **kwargs)
table.extend(average33)
long_table.extend(long33)
display(html_table([header] + average33))
display(html_table([header]+long_table))
print(UTC(), ':: finished, elapsed', str(round((time.time()-start)/3600.0, 1)), 'hours')
table_str = list2file(table, out_dir+'/short_table.txt')
if runs == (1,1):
print('Results saved to', out_dir + '/short_table.txt')
else:
long_table_str = list2file(long_table, out_dir+'/long_table.txt')
print('Average results saved to', out_dir + '/short_table.txt\n'
'Detailed results for every run saved to', out_dir + '/long_table.txt')