Commit c971b69b authored by matjaz's avatar matjaz
Browse files

Merge branch 'dev_package_independance' into dev

parents dd17e8f2 15b30956
......@@ -4,6 +4,7 @@ import os
PROJECT_DIR = os.path.dirname(__file__)
PUBLIC_DIR = os.path.join(PROJECT_DIR, 'public')
BACKUP_DIR = os.path.join(PROJECT_DIR, 'backup')
DEBUG = False
TEMPLATE_DEBUG = True
......
import os
# === STANDARD PACKAGE SETTINGS ===
PACKAGE_ROOT = os.path.dirname(__file__)
# === AUTO IMPORT OPTIONS ===
#If auto_import_package_data is true then given data file is automatically imported when ClowdFlows project is newly deployed or refreshed from git
AUTO_IMPORT_DB = True
#For auto_import_package_data_replace_option description see the 'replace' option in workflows/import_package command
AUTO_IMPORT_DB_REPLACE_OPTION = True
#If file(s) other than ./db/package_data.json should be imported, auto_import_package_data_files should be corrected
AUTO_IMPORT_DB_FILES = [os.path.join(PACKAGE_ROOT,'db/package_data.json')]
import sys
from django.shortcuts import render
from workflows import packageLibImporter
from workflows import module_importer
def setattr_local(name, value, package):
setattr(sys.modules[__name__], name, value)
packageLibImporter.importAllPackagesLib("interaction_views",setattr_local)
module_importer.import_all_packages_libs("interaction_views",setattr_local)
def test_interaction(request,input_dict,output_dict,widget):
return render(request, 'interactions/test_interaction.html',{'widget':widget})
......
This diff is collapsed.
import logging
import os
import sys
from settings import *
from settings import PACKAGE_ROOT
#------------------------------------------------------------------------------
# prepare environment for loading latino (Python.net interpreter should be used)
# see: http://pythonnet.sourceforge.net/
#------------------------------------------------------------------------------
sys.path.append(package_bin)
sys.path.append(os.path.join(PACKAGE_ROOT, 'bin'))
try:
from LatinoCloudFlows import *
import System
import Latino
from LatinoClowdFlows import *
except Exception:
logging.warning("LatinoClowdFlows could not be imported! Either there are no Latino dll available or a "\
"wrong interpreter is used. See 'http://pythonnet.sourceforge.net' for interpreter details. "\
"Other functionality (besides latino) will work as .")
logging.warning("DotNet assemblies could not be loaded! Probable reasons: missing dlls or wrong interpreter (see http://pythonnet.sourceforge.net). "
"Other functionality of ClowdFlows (besides .Net assemblies) should be OK!")
pass
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
\ No newline at end of file
......@@ -5,6 +5,25 @@
from serialization_utils import *
from import_dotnet import *
def sentiment_analysis_con_cat(inputDict):
_str1 = ToString(inputDict['str1'])
_str2 = ToString(inputDict['str2'])
execResult = SentimentAnalysisCF.ConCat(_str1, _str2)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['out'] = execResultPy
return outputDict
def sentiment_analysis_ena_cudna_funk(inputDict):
_str1 = ToString(inputDict['str1'])
_str2 = ToString(inputDict['str2'])
_str3 = ToString(inputDict['str3'])
_str4 = ToString(inputDict['str4'])
execResult = SentimentAnalysisCF.EnaCudnaFunk(_str1, _str2, _str3, _str4)
execResultPy = ToPyObj(execResult)
outputDict = {}
outputDict['out'] = execResultPy
return outputDict
def latino_flatten_object_to_string_array(inputDict):
_data = ToNetObj(inputDict['data'])
......@@ -271,7 +290,7 @@ def latino_construct_snowball_stemmer(inputDict):
return outputDict
def latino_construct_stop_words_tagger(inputDict):
_stopWords = ToString(inputDict['stopWords'])
_stopWords = ToNetObj(inputDict['stopWords'])
_ignoreCase = ToBool(inputDict['ignoreCase'])
execResult = LatinoCF.ConstructStopWordsTagger(_stopWords, _ignoreCase)
execResultPy = ToPyObj(execResult)
......@@ -342,7 +361,7 @@ def latino_tag_string_stopwords(inputDict):
outputDict['string'] = execResultPy
return outputDict
def latino_construct_bow_space(inputDict):
def latino_construct_bow_space_1(inputDict):
_documents = ToNetObj(inputDict['documents'])
_tokenizer = ToNetObj(inputDict['tokenizer'])
_stemmer = ToNetObj(inputDict['stemmer'])
......@@ -359,7 +378,7 @@ def latino_construct_bow_space(inputDict):
outputDict['ds'] = execResultPy['ds']
return outputDict
def latino_construct_bow_space(inputDict):
def latino_construct_bow_space_2(inputDict):
_adc = ToNetObj(inputDict['adc'])
_tokenId = ToString(inputDict['tokenId'])
_stemId = ToString(inputDict['stemId'])
......@@ -474,7 +493,7 @@ def latino_sparse_matrix_to_table(inputDict):
def latino_construct_kmeans_clusterer(inputDict):
_k = ToInt(inputDict['k'])
_centroidType = ToEnum(Latino.Model.CentroidType, inputDict['centroidType'], Latino.Model.CentroidType.NrmL2)
_similarityModel = ToEnum(LatinoCloudFlows.SimilarityModel, inputDict['similarityModel'], LatinoCloudFlows.SimilarityModel.Cosine)
_similarityModel = ToEnum(LatinoClowdFlows.SimilarityModel, inputDict['similarityModel'], LatinoClowdFlows.SimilarityModel.Cosine)
_randomSeed = ToInt(inputDict['randomSeed'])
_eps = ToFloat(inputDict['eps'])
_trials = ToInt(inputDict['trials'])
......@@ -522,7 +541,7 @@ def latino_clustering_results_info(inputDict):
return outputDict
def latino_construct_centroid_classifier(inputDict):
_similarityModel = ToEnum(LatinoCloudFlows.SimilarityModel, inputDict['similarityModel'], LatinoCloudFlows.SimilarityModel.Cosine)
_similarityModel = ToEnum(LatinoClowdFlows.SimilarityModel, inputDict['similarityModel'], LatinoClowdFlows.SimilarityModel.Cosine)
_normalizeCentorids = ToBool(inputDict['normalizeCentorids'])
execResult = LatinoCF.ConstructCentroidClassifier(_similarityModel, _normalizeCentorids)
execResultPy = ToPyObj(execResult)
......@@ -597,7 +616,7 @@ def latino_construct_maximum_entropy_classifier_fast(inputDict):
return outputDict
def latino_construct_knn_classifier(inputDict):
_similarityModel = ToEnum(LatinoCloudFlows.SimilarityModel, inputDict['similarityModel'], LatinoCloudFlows.SimilarityModel.Cosine)
_similarityModel = ToEnum(LatinoClowdFlows.SimilarityModel, inputDict['similarityModel'], LatinoClowdFlows.SimilarityModel.Cosine)
_k = ToInt(inputDict['k'])
_softVoting = ToBool(inputDict['softVoting'])
execResult = LatinoCF.ConstructKnnClassifier(_similarityModel, _k, _softVoting)
......
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from collections import Counter
from optparse import make_option
from workflows.latino.latino import LatinoCF
from pprint import pprint
import sys
class Command(BaseCommand):
args = 'file_name'
help = 'Export all widgets marked in the c# dll package as widgets to the json file which can be imported to the mothra database'
def handle(self, *args, **options):
if (len(args)<1):
raise CommandError('Argument "file_name" is required!')
try:
f = open(args[0], 'w')
except:
raise CommandError('There was a problem with creating/overwriting given output file')
self.stdout.write('Creating json representations of widgets, inputs, outputs, options and categories ... ')
outstr = LatinoCF.GetJsonDbDefinitions()
self.stdout.write('done.\n')
try:
f.write(outstr)
except:
raise CommandError('There was a problem with writing to the given output file')
self.stdout.write('Json definitions successfully created. Results written to the file.\n')
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment