katana-assistant code example
Example 1: katana-assistant
# Fit the modelmodel.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
Example 2: katana-assistant
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this modelsgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
Example 3: katana-assistant
# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons# equal to number of intents to predict output intent with softmaxmodel = Sequential()model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))model.add(Dropout(0.5))model.add(Dense(64, activation='relu'))model.add(Dropout(0.5))model.add(Dense(len(train_y[0]), activation='softmax'))
Example 4: katana-assistant
# create our training datatraining = []# create an empty array for our outputoutput_empty = [0] * len(classes)# training set, bag of words for each sentencefor doc in documents: # initialize our bag of words bag = [] # list of tokenized words for the pattern pattern_words = doc[0] # stem each word - create base word, in attempt to represent related words pattern_words = [stemmer.stem(word.lower()) for word in pattern_words] # create our bag of words array with 1, if word match found in current pattern for w in words: bag.append(1) if w in pattern_words else bag.append(0) # output is a '0' for each tag and '1' for current tag (for each pattern) output_row = list(output_empty) output_row[classes.index(doc[1])] = 1 training.append([bag, output_row])# shuffle our features and turn into np.arrayrandom.shuffle(training)training = np.array(training)# create train and test lists. X - patterns, Y - intentstrain_x = list(training[:,0])train_y = list(training[:,1])
Example 5: katana-assistant
45 documents9 classes ['adverse_drug', 'blood_pressure', 'blood_pressure_search', 'goodbye', 'greeting', 'hospital_search', 'options', 'pharmacy_search', 'thanks']82 unique stemmed words ["'s", ',', 'a', 'advers', 'al', 'anyon', 'ar', 'awesom', 'be', 'behavy', 'blood', 'by', 'bye', 'can', 'caus', 'chat', 'check', 'could', 'dat', 'day', 'detail', 'do', 'dont', 'drug', 'entry', 'find', 'for', 'giv', 'good', 'goodby', 'hav', 'hello', 'help', 'hi', 'hist', 'hospit', 'how', 'i', 'id', 'is', 'lat', 'list', 'load', 'loc', 'log', 'look', 'lookup', 'man', 'me', 'mod', 'nearby', 'next', 'nic', 'of', 'off', 'op', 'paty', 'pharm', 'press', 'provid', 'react', 'rel', 'result', 'search', 'see', 'show', 'suit', 'support', 'task', 'thank', 'that', 'ther', 'til', 'tim', 'to', 'transf', 'up', 'want', 'what', 'which', 'with', 'you']
Example 6: katana-assistant
classify_local('Fetch blood result for patient')
Example 7: katana-assistant
words = []classes = []documents = []ignore_words = ['?']# loop through each sentence in our intents patternsfor intent in intents['intents']: for pattern in intent['patterns']: # tokenize each word in the sentence w = nltk.word_tokenize(pattern) # add to our words list words.extend(w) # add to documents in our corpus documents.append((w, intent['tag'])) # add to our classes list if intent['tag'] not in classes: classes.append(intent['tag'])# stem and lower each word and remove duplicateswords = [stemmer.stem(w.lower()) for w in words if w not in ignore_words]words = sorted(list(set(words)))# sort classesclasses = sorted(list(set(classes)))# documents = combination between patterns and intentsprint (len(documents), "documents")# classes = intentsprint (len(classes), "classes", classes)# words = all words, vocabularyprint (len(words), "unique stemmed words", words)
Example 8: katana-assistant
def classify_local(sentence): ERROR_THRESHOLD = 0.25 # generate probabilities from the model input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input']) results = model.predict([input_data])[0] # filter out predictions below a threshold, and provide intent index results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD] # sort by strength of probability results.sort(key=lambda x: x[1], reverse=True) return_list = [] for r in results: return_list.append((classes[r[0]], str(r[1]))) # return tuple of intent and probability return return_list
Example 9: katana-assistant
# Use pickle to load in the pre-trained modelglobal graphgraph = tf.get_default_graph()with open(f'katana-assistant-model.pkl', 'rb') as f: model = pickle.load(f)
Example 10: katana-assistant
import nltkfrom nltk.stem.lancaster import LancasterStemmerstemmer = LancasterStemmer()# things we need for Tensorflowimport numpy as npfrom keras.models import Sequentialfrom keras.layers import Dense, Activation, Dropoutfrom keras.optimizers import SGDimport pandas as pdimport pickleimport random