2018-10-29
: clean training and test sets¶Agglomerative clustering, test_grammar
updated 2018-10-19 , Link Grammar 5.4.4.
This notebook is shared as static Child-Directed-Speech-2018-10-29.html
The data is shared via Child-Directed-Speech-2018-10-29 directory.
Previous (reference) tests:
Child-Directed-Speech-2018-10-19.html,
Child-Directed-Speech-2018-08-14.html,
Child-Directed-Speech-2018-08-06.html.
import os, sys, time
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path: sys.path.append(module_path)
from src.grammar_learner.utl import UTC
from src.grammar_learner.read_files import check_dir
from src.grammar_learner.write_files import list2file
from src.grammar_learner.widgets import html_table
from src.grammar_learner.pqa_table import table_rows
tmpath = module_path + '/tmp/'
check_dir(tmpath, True, 'none')
table = []
long_table = []
start = time.time()
print(UTC(), ':: module_path =', module_path)
#corpus = 'CDS-caps-br-text+brent9mos'
corpus = 'CDS-caps-br-text'
dataset = 'LG-English'
dataset = 'LG-English-clean-clean' # 2018-10-29: only 100% parsed
out_dir = module_path + '/output/Child-Directed-Speech-' + str(UTC())[:10]
runs = (1,1)
kwargs = {
'left_wall' : '' ,
'period' : False ,
'context' : 2 ,
'word_space' : 'sparse' ,
'min_word_count': 5 ,
'min_link_count': 2 ,
'min_co-occurrence_count': 1 ,
'min_co-occurrence_probability' : 0.0 ,
'clustering' : ('agglomerative', 'ward'),
'cluster_range' : 10,
'cluster_criteria' : 'silhouette',
'clustering_metric' : ('silhouette', 'cosine'),
'cluster_level' : 1 ,
'grammar_rules' : 2 ,
'max_disjuncts' : 100 ,
'tmpath' : tmpath ,
'verbose' : 'min' ,
'template_path' : 'poc-turtle',
'linkage_limit' : 1000 ,
'categories_generalization': 'off' }
lines = [
[33, corpus , dataset ,0,0, 'none' ],
[34, corpus , dataset ,0,0, 'rules' ],
[35, corpus , 'R=6-Weight=6:R-mst-weight=+1:R' ,0,0, 'none' ],
[36, corpus , 'R=6-Weight=6:R-mst-weight=+1:R' ,0,0, 'rules' ]]
line = [[33, corpus, dataset, 0,0, 'none' ]]
rp = module_path + '/data/CDS-caps-br-text/LG-English-clean-clean'
cp = rp # corpus path = reference_path :: use 'gold' parses as test corpus
Only 100% parsed sentences
%%capture
dataset = 'LG-English-clean-clean' # 2018-10-29: only 100% parsed
rp = module_path + '/data/CDS-caps-br-text/LG-English-clean-clean'
cp = rp # corpus path = reference_path :: use 'gold' parses as test corpus
average21 = []
table = []
crange = kwargs['cluster_range']
for kwargs['cluster_range'] in range(5,45,5):
kwargs['max_disjuncts'] = 500
#kwargs['max_disjuncts'] = 10 * kwargs['cluster_range']
average, _, header = table_rows(line, out_dir, cp, rp, runs, **kwargs)
average21.extend(average)
table.extend(average)
kwargs['cluster_range'] = crange
display(html_table([header] + average21))
%%capture
corpus = 'CDS-caps-br-text+brent9mos'
dataset = 'LG-English'
rp = module_path + '/data/CDS-caps-br-text/LG-English-clean-clean'
cp = rp # corpus path = reference_path :: use 'gold' parses as test corpus
average22 = []
table = []
crange = kwargs['cluster_range']
crange = kwargs['cluster_range']
for kwargs['cluster_range'] in range(5,45,5):
kwargs['max_disjuncts'] = 500
#kwargs['max_disjuncts'] = 10 * kwargs['cluster_range']
average, _, header = table_rows(line, out_dir, cp, rp, runs, **kwargs)
average22.extend(average)
table.extend(average)
kwargs['cluster_range'] = crange
display(html_table([header] + average22))
%%capture
dataset = 'LG-English-clean' # 2018-10-26: moderate cleanup
rp = module_path + '/data/CDS-caps-br-text/LG-English-clean'
cp = rp # corpus path = reference_path :: use 'gold' parses as test corpus
average23 = []
table = []
crange = kwargs['cluster_range']
crange = kwargs['cluster_range']
for kwargs['cluster_range'] in range(5,45,5):
kwargs['max_disjuncts'] = 500
#kwargs['max_disjuncts'] = 10 * kwargs['cluster_range']
average, _, header = table_rows(line, out_dir, cp, rp, runs, **kwargs)
average23.extend(average)
table.extend(average)
kwargs['cluster_range'] = crange
display(html_table([header] + average23))