Commit c971b69b authored by matjaz's avatar matjaz
Browse files

Merge branch 'dev_package_independance' into dev

parents dd17e8f2 15b30956
......@@ -4,6 +4,7 @@ import os
PROJECT_DIR = os.path.dirname(__file__)
PUBLIC_DIR = os.path.join(PROJECT_DIR, 'public')
BACKUP_DIR = os.path.join(PROJECT_DIR, 'backup')
DEBUG = False
TEMPLATE_DEBUG = True
......
import os
# === STANDARD PACKAGE SETTINGS ===
PACKAGE_ROOT = os.path.dirname(__file__)
# === AUTO IMPORT OPTIONS ===
#If auto_import_package_data is true then given data file is automatically imported when ClowdFlows project is newly deployed or refreshed from git
AUTO_IMPORT_DB = True
#For auto_import_package_data_replace_option description see the 'replace' option in workflows/import_package command
AUTO_IMPORT_DB_REPLACE_OPTION = True
#If file(s) other than ./db/package_data.json should be imported, auto_import_package_data_files should be corrected
AUTO_IMPORT_DB_FILES = [os.path.join(PACKAGE_ROOT,'db/package_data.json')]
import sys
from django.shortcuts import render
from workflows import packageLibImporter
from workflows import module_importer
def setattr_local(name, value, package):
setattr(sys.modules[__name__], name, value)
packageLibImporter.importAllPackagesLib("interaction_views",setattr_local)
module_importer.import_all_packages_libs("interaction_views",setattr_local)
def test_interaction(request,input_dict,output_dict,widget):
return render(request, 'interactions/test_interaction.html',{'widget':widget})
......
This source diff could not be displayed because it is too large. You can view the blob instead.
import logging
import os
import sys
from settings import *
from settings import PACKAGE_ROOT
#------------------------------------------------------------------------------
# prepare environment for loading latino (Python.net interpreter should be used)
# see: http://pythonnet.sourceforge.net/
#------------------------------------------------------------------------------
sys.path.append(package_bin)
sys.path.append(os.path.join(PACKAGE_ROOT, 'bin'))
try:
from LatinoCloudFlows import *
import System
import Latino
from LatinoClowdFlows import *
except Exception:
logging.warning("LatinoClowdFlows could not be imported! Either there are no Latino dll available or a "\
"wrong interpreter is used. See 'http://pythonnet.sourceforge.net' for interpreter details. "\
"Other functionality (besides latino) will work as .")
logging.warning("DotNet assemblies could not be loaded! Probable reasons: missing dlls or wrong interpreter (see http://pythonnet.sourceforge.net). "
"Other functionality of ClowdFlows (besides .Net assemblies) should be OK!")
pass
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
\ No newline at end of file
# -----------------------------------------------------------------------------------------------------
# WARNING: THIS IS AUTOMATICALLY GENERATED FILE, DO NOT EDIT IT MANUALLY AS YOU MAY LOOSE YOUR CHANGES!
# -----------------------------------------------------------------------------------------------------
from serialization_utils import *
from import_dotnet import *
def latino_flatten_object_to_string_array(inputDict):
_data = ToNetObj(inputDict['data'])
execResult = LatinoCF.FlattenObjectToStringArray(_data)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['flatData'] = execResultPy
return outputDict
def latino_load_adc(inputDict):
_file = ToString(inputDict['file'])
_leadingLabels = ToBool(inputDict['leadingLabels'])
execResult = LatinoCF.LoadADC(_file, _leadingLabels)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_load_adcfrom_string(inputDict):
_plainString = ToString(inputDict['plainString'])
_leadingLabels = ToBool(inputDict['leadingLabels'])
execResult = LatinoCF.LoadADCFromString(_plainString, _leadingLabels)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_save_adcto_xml(inputDict):
_adc = ToNetObj(inputDict['adc'])
execResult = LatinoCF.SaveADCtoXml(_adc)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['string'] = execResultPy
return outputDict
def latino_load_adcfrom_xml(inputDict):
_xml = ToString(inputDict['xml'])
execResult = LatinoCF.LoadADCfromXml(_xml)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_get_doc_strings(inputDict):
_adc = ToNetObj(inputDict['adc'])
_elementAnnotation = ToString(inputDict['elementAnnotation'])
_elementFeatureConditions = ToString(inputDict['elementFeatureConditions'])
_delimiter = ToString(inputDict['delimiter'])
_includeDocId = ToBool(inputDict['includeDocId'])
execResult = LatinoCF.GetDocStrings(_adc, _elementAnnotation, _elementFeatureConditions, _delimiter, _includeDocId)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['strings'] = execResultPy
return outputDict
def latino_extract_documents_features(inputDict):
_adc = ToNetObj(inputDict['adc'])
_featureName = ToString(inputDict['featureName'])
execResult = LatinoCF.ExtractDocumentsFeatures(_adc, _featureName)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['strings'] = execResultPy
return outputDict
def latino_add_documents_features(inputDict):
_adc = ToNetObj(inputDict['adc'])
_featureValues = ToNetObj(inputDict['featureValues'])
_featureName = ToString(inputDict['featureName'])
_featureValuePrefix = ToString(inputDict['featureValuePrefix'])
execResult = LatinoCF.AddDocumentsFeatures(_adc, _featureValues, _featureName, _featureValuePrefix)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_split_documents_by_feature_value(inputDict):
_adc = ToNetObj(inputDict['adc'])
_featureCondition = ToString(inputDict['featureCondition'])
execResult = LatinoCF.SplitDocumentsByFeatureValue(_adc, _featureCondition)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adcFiltered'] = execResultPy['adcFiltered']
outputDict['adcRest'] = execResultPy['adcRest']
return outputDict
def latino_mark_documents_with_set_feature(inputDict):
_adc = ToNetObj(inputDict['adc'])
_featureName = ToString(inputDict['featureName'])
_featureValuePrefix = ToString(inputDict['featureValuePrefix'])
_numOfSets = ToInt(inputDict['numOfSets'])
_random = ToBool(inputDict['random'])
_useSeed = ToBool(inputDict['useSeed'])
_randomSeed = ToInt(inputDict['randomSeed'])
execResult = LatinoCF.MarkDocumentsWithSetFeature(_adc, _featureName, _featureValuePrefix, _numOfSets, _random, _useSeed, _randomSeed)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_corpus_statistics(inputDict):
_adc = ToNetObj(inputDict['adc'])
execResult = LatinoCF.CorpusStatistics(_adc)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['docCount'] = execResultPy['docCount']
outputDict['featureCount'] = execResultPy['featureCount']
outputDict['description'] = execResultPy['description']
return outputDict
def latino_construct_english_maximum_entropy_sentence_detector(inputDict):
execResult = LatinoCF.ConstructEnglishMaximumEntropySentenceDetector()
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tokenizer'] = execResultPy
return outputDict
def latino_construct_english_maximum_entropy_tokenizer(inputDict):
_alphaNumericOptimization = ToBool(inputDict['alphaNumericOptimization'])
execResult = LatinoCF.ConstructEnglishMaximumEntropyTokenizer(_alphaNumericOptimization)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tokenizer'] = execResultPy
return outputDict
def latino_construct_unicode_tokenizer(inputDict):
_filter = ToEnum(Latino.TextMining.TokenizerFilter, inputDict['filter'], Latino.TextMining.TokenizerFilter.None)
_minTokenLen = ToInt(inputDict['minTokenLen'])
execResult = LatinoCF.ConstructUnicodeTokenizer(_filter, _minTokenLen)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tokenizer'] = execResultPy
return outputDict
def latino_construct_simple_tokenizer(inputDict):
_type = ToEnum(Latino.TextMining.TokenizerType, inputDict['type'], Latino.TextMining.TokenizerType.AllChars)
_minTokenLen = ToInt(inputDict['minTokenLen'])
execResult = LatinoCF.ConstructSimpleTokenizer(_type, _minTokenLen)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tokenizer'] = execResultPy
return outputDict
def latino_construct_regex_tokenizer(inputDict):
_tokenRegex = ToString(inputDict['tokenRegex'])
_ignoreUnknownTokens = ToBool(inputDict['ignoreUnknownTokens'])
_regexOptionsIgnoreCase = ToBool(inputDict['regexOptionsIgnoreCase'])
_regexOptionsMultiline = ToBool(inputDict['regexOptionsMultiline'])
_regexOptionsExplicitCapture = ToBool(inputDict['regexOptionsExplicitCapture'])
_regexOptionsCompiled = ToBool(inputDict['regexOptionsCompiled'])
_regexOptionsSingleline = ToBool(inputDict['regexOptionsSingleline'])
_regexOptionsIgnorePatternWhitespace = ToBool(inputDict['regexOptionsIgnorePatternWhitespace'])
_regexOptionsRightToLeft = ToBool(inputDict['regexOptionsRightToLeft'])
_regexOptionsECMAScript = ToBool(inputDict['regexOptionsECMAScript'])
_regexOptionsCultureInvariant = ToBool(inputDict['regexOptionsCultureInvariant'])
execResult = LatinoCF.ConstructRegexTokenizer(_tokenRegex, _ignoreUnknownTokens, _regexOptionsIgnoreCase, _regexOptionsMultiline, _regexOptionsExplicitCapture, _regexOptionsCompiled, _regexOptionsSingleline, _regexOptionsIgnorePatternWhitespace, _regexOptionsRightToLeft, _regexOptionsECMAScript, _regexOptionsCultureInvariant)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tokenizer'] = execResultPy
return outputDict
def latino_tokenize_sentences(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
_inputAnnotation = ToString(inputDict['inputAnnotation'])
_outputAnnotation = ToString(inputDict['outputAnnotation'])
execResult = LatinoCF.TokenizeSentences(_adc, _tokenizer, _inputAnnotation, _outputAnnotation)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_tokenize_words(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
_inputAnnotation = ToString(inputDict['inputAnnotation'])
_outputAnnotation = ToString(inputDict['outputAnnotation'])
execResult = LatinoCF.TokenizeWords(_adc, _tokenizer, _inputAnnotation, _outputAnnotation)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_tokenize_multiple(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
_inputAnnotation = ToString(inputDict['inputAnnotation'])
_outputAnnotation = ToString(inputDict['outputAnnotation'])
execResult = LatinoCF.TokenizeMultiple(_adc, _tokenizer, _inputAnnotation, _outputAnnotation)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_tokenize_string_string(inputDict):
_text = ToNetObj(inputDict['text'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
execResult = LatinoCF.TokenizeStringString(_text, _tokenizer)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['string'] = execResultPy
return outputDict
def latino_tokenize_string_words(inputDict):
_text = ToNetObj(inputDict['text'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
execResult = LatinoCF.TokenizeStringWords(_text, _tokenizer)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['string'] = execResultPy
return outputDict
def latino_construct_english_maximum_entropy_pos_tagger(inputDict):
_beamSize = ToInt(inputDict['beamSize'])
execResult = LatinoCF.ConstructEnglishMaximumEntropyPosTagger(_beamSize)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['posTagger'] = execResultPy
return outputDict
def latino_pos_tag(inputDict):
_adc = ToNetObj(inputDict['adc'])
_posTagger = ToNetObj(inputDict['posTagger'])
_groupAnnotation = ToString(inputDict['groupAnnotation'])
_elementAnnotation = ToString(inputDict['elementAnnotation'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.PosTag(_adc, _posTagger, _groupAnnotation, _elementAnnotation, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_pos_tag_string(inputDict):
_text = ToNetObj(inputDict['text'])
_posTagger = ToNetObj(inputDict['posTagger'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.PosTagString(_text, _posTagger, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['string'] = execResultPy
return outputDict
def latino_get_stop_words(inputDict):
_language = ToEnum(Latino.TextMining.Language, inputDict['language'], Latino.TextMining.Language.English)
execResult = LatinoCF.GetStopWords(_language)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['strings'] = execResultPy
return outputDict
def latino_construct_lemma_sharp_lemmatizer(inputDict):
_language = ToEnum(Latino.TextMining.Language, inputDict['language'], Latino.TextMining.Language.English)
execResult = LatinoCF.ConstructLemmaSharpLemmatizer(_language)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tagger'] = execResultPy
return outputDict
def latino_construct_snowball_stemmer(inputDict):
_language = ToEnum(Latino.TextMining.Language, inputDict['language'], Latino.TextMining.Language.English)
execResult = LatinoCF.ConstructSnowballStemmer(_language)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tagger'] = execResultPy
return outputDict
def latino_construct_stop_words_tagger(inputDict):
_stopWords = ToString(inputDict['stopWords'])
_ignoreCase = ToBool(inputDict['ignoreCase'])
execResult = LatinoCF.ConstructStopWordsTagger(_stopWords, _ignoreCase)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tagger'] = execResultPy
return outputDict
def latino_construct_condition_tagger(inputDict):
_featureCondition = ToString(inputDict['featureCondition'])
_outputFeatureValue = ToString(inputDict['outputFeatureValue'])
_elementsTextToFeatureValue = ToBool(inputDict['elementsTextToFeatureValue'])
execResult = LatinoCF.ConstructConditionTagger(_featureCondition, _outputFeatureValue, _elementsTextToFeatureValue)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tagger'] = execResultPy
return outputDict
def latino_tag_adcstem_lemma(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tagger = ToNetObj(inputDict['tagger'])
_elementAnnotation = ToString(inputDict['elementAnnotation'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.TagADCStemLemma(_adc, _tagger, _elementAnnotation, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_tag_adcstopwords(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tagger = ToNetObj(inputDict['tagger'])
_elementAnnotation = ToString(inputDict['elementAnnotation'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.TagADCStopwords(_adc, _tagger, _elementAnnotation, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_tag_adcmultiple(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tagger = ToNetObj(inputDict['tagger'])
_elementAnnotation = ToString(inputDict['elementAnnotation'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.TagADCMultiple(_adc, _tagger, _elementAnnotation, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['adc'] = execResultPy
return outputDict
def latino_tag_string_stem_lemma(inputDict):
_text = ToNetObj(inputDict['text'])
_tagger = ToNetObj(inputDict['tagger'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.TagStringStemLemma(_text, _tagger, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['string'] = execResultPy
return outputDict
def latino_tag_string_stopwords(inputDict):
_text = ToNetObj(inputDict['text'])
_tagger = ToNetObj(inputDict['tagger'])
_outputFeature = ToString(inputDict['outputFeature'])
execResult = LatinoCF.TagStringStopwords(_text, _tagger, _outputFeature)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['string'] = execResultPy
return outputDict
def latino_construct_bow_space(inputDict):
_documents = ToNetObj(inputDict['documents'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
_stemmer = ToNetObj(inputDict['stemmer'])
_stopwords = ToNetObj(inputDict['stopwords'])
_maxNGramLen = ToInt(inputDict['maxNGramLen'])
_minWordFreq = ToInt(inputDict['minWordFreq'])
_wordWeightType = ToEnum(Latino.TextMining.WordWeightType, inputDict['wordWeightType'], Latino.TextMining.WordWeightType.TfIdf)
_cutLowWeightsPerc = ToFloat(inputDict['cutLowWeightsPerc'])
_normalizeVectors = ToBool(inputDict['normalizeVectors'])
execResult = LatinoCF.ConstructBowSpace(_documents, _tokenizer, _stemmer, _stopwords, _maxNGramLen, _minWordFreq, _wordWeightType, _cutLowWeightsPerc, _normalizeVectors)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['bow'] = execResultPy['bow']
outputDict['ds'] = execResultPy['ds']
return outputDict
def latino_construct_bow_space(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tokenId = ToString(inputDict['tokenId'])
_stemId = ToString(inputDict['stemId'])
_stopwordId = ToString(inputDict['stopwordId'])
_labelId = ToString(inputDict['labelId'])
_maxNGramLen = ToInt(inputDict['maxNGramLen'])
_minWordFreq = ToInt(inputDict['minWordFreq'])
_wordWeightType = ToEnum(Latino.TextMining.WordWeightType, inputDict['wordWeightType'], Latino.TextMining.WordWeightType.TfIdf)
_cutLowWeightsPerc = ToFloat(inputDict['cutLowWeightsPerc'])
_normalizeVectors = ToBool(inputDict['normalizeVectors'])
execResult = LatinoCF.ConstructBowSpace(_adc, _tokenId, _stemId, _stopwordId, _labelId, _maxNGramLen, _minWordFreq, _wordWeightType, _cutLowWeightsPerc, _normalizeVectors)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['bow'] = execResultPy['bow']
outputDict['ds'] = execResultPy['ds']
return outputDict
def latino_get_vocabulary(inputDict):
_bow = ToNetObj(inputDict['bow'])
_startIndex = ToInt(inputDict['startIndex'])
_maxWords = ToInt(inputDict['maxWords'])
execResult = LatinoCF.GetVocabulary(_bow, _startIndex, _maxWords)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['vocabulary'] = execResultPy
return outputDict
def latino_process_new_documents_from_adc(inputDict):
_bow = ToNetObj(inputDict['bow'])
_adc = ToNetObj(inputDict['adc'])
execResult = LatinoCF.ProcessNewDocumentsFromADC(_bow, _adc)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['ds'] = execResultPy
return outputDict
def latino_process_new_documents_from_string(inputDict):
_bow = ToNetObj(inputDict['bow'])
_lst = ToNetObj(inputDict['lst'])
execResult = LatinoCF.ProcessNewDocumentsFromString(_bow, _lst)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['ds'] = execResultPy
return outputDict
def latino_add_labels_to_document_vectors(inputDict):
_ds = ToNetObj(inputDict['ds'])
_labels = ToNetObj(inputDict['labels'])
execResult = LatinoCF.AddLabelsToDocumentVectors(_ds, _labels)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['ds'] = execResultPy
return outputDict
def latino_remove_document_vectors_labels(inputDict):
_ds = ToNetObj(inputDict['ds'])
execResult = LatinoCF.RemoveDocumentVectorsLabels(_ds)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['ds'] = execResultPy
return outputDict
def latino_extract_dataset_labels(inputDict):
_ds = ToNetObj(inputDict['ds'])
execResult = LatinoCF.ExtractDatasetLabels(_ds)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['labels'] = execResultPy
return outputDict
def latino_dataset_split_simple(inputDict):
_ds = ToNetObj(inputDict['ds'])
_percentage = ToFloat(inputDict['percentage'])
_randomSeed = ToInt(inputDict['randomSeed'])
execResult = LatinoCF.DatasetSplitSimple(_ds, _percentage, _randomSeed)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['ds1'] = execResultPy['ds1']
outputDict['ds2'] = execResultPy['ds2']
return outputDict
def latino_dataset_split_predefined(inputDict):
_ds = ToNetObj(inputDict['ds'])
_sets = ToNetObj(inputDict['sets'])
_setId = ToInt(inputDict['setId'])
execResult = LatinoCF.DatasetSplitPredefined(_ds, _sets, _setId)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['ds1'] = execResultPy['ds1']
outputDict['ds2'] = execResultPy['ds2']
return outputDict
def latino_calculate_similarity_matrix(inputDict):
_ds1 = ToNetObj(inputDict['ds1'])
_ds2 = ToNetObj(inputDict['ds2'])
_thresh = ToFloat(inputDict['thresh'])
_fullMatrix = ToBool(inputDict['fullMatrix'])
execResult = LatinoCF.CalculateSimilarityMatrix(_ds1, _ds2, _thresh, _fullMatrix)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['smx'] = execResultPy
return outputDict
def latino_sparse_matrix_to_table(inputDict):
_smx = ToNetObj(inputDict['smx'])
execResult = LatinoCF.SparseMatrixToTable(_smx)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['tbl'] = execResultPy
return outputDict
def latino_construct_kmeans_clusterer(inputDict):
_k = ToInt(inputDict['k'])
_centroidType = ToEnum(Latino.Model.CentroidType, inputDict['centroidType'], Latino.Model.CentroidType.NrmL2)
_similarityModel = ToEnum(LatinoCloudFlows.SimilarityModel, inputDict['similarityModel'], LatinoCloudFlows.SimilarityModel.Cosine)
_randomSeed = ToInt(inputDict['randomSeed'])
_eps = ToFloat(inputDict['eps'])
_trials = ToInt(inputDict['trials'])
execResult = LatinoCF.ConstructKMeansClusterer(_k, _centroidType, _similarityModel, _randomSeed, _eps, _trials)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['clusterer'] = execResultPy
return outputDict
def latino_construct_kmeans_fast_clusterer(inputDict):
_k = ToInt(inputDict['k'])
_randomSeed = ToInt(inputDict['randomSeed'])
_eps = ToFloat(inputDict['eps'])
_trials = ToInt(inputDict['trials'])
execResult = LatinoCF.ConstructKMeansFastClusterer(_k, _randomSeed, _eps, _trials)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['clusterer'] = execResultPy
return outputDict
def latino_construct_hierarchical_bisecting_clusterer(inputDict):
_minQuality = ToFloat(inputDict['minQuality'])
execResult = LatinoCF.ConstructHierarchicalBisectingClusterer(_minQuality)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['clusterer'] = execResultPy
return outputDict