Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Q
Question chain chatbot code
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
2022-298
Question chain chatbot code
Commits
b825855e
Commit
b825855e
authored
May 13, 2022
by
Vihanga Thathsara Pahalagamage
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add new file
parent
3c1bdf44
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
71 additions
and
0 deletions
+71
-0
Question chain chatbot code 3
Question chain chatbot code 3
+71
-0
No files found.
Question chain chatbot code 3
0 → 100644
View file @
b825855e
import
random
import
json
import
pickle
import
numpy
as
np
import
nltk
from
nltk
.
stem
import
WordNetLemmatizer
lemmatizer
=
WordNetLemmatizer
()
from
keras
.
models
import
Sequential
from
keras
.
layers
import
Dense
,
Dropout
from
keras
.
optimizers
import
gradient_descent_v2
intents
=
json
.
loads
(
open
(
'intents.json'
).
read
())
words
=
[]
classes
=
[]
documents
=
[]
ignore_letters
=
[
'?'
,
'!'
,
'.'
,
','
]
for
intent
in
intents
[
'intents'
]:
for
pattern
in
intent
[
'patterns'
]:
word_list
=
nltk
.
word_tokenize
(
pattern
)
words
.
extend
(
word_list
)
documents
.
append
((
word_list
,
intent
[
'tag'
]))
if
intent
[
'tag'
]
not
in
classes
:
classes
.
append
(
intent
[
'tag'
])
#
print
(
documents
)
words
=
[
lemmatizer
.
lemmatize
(
word
)
for
word
in
words
if
word
not
in
ignore_letters
]
words
=
sorted
(
set
(
words
))
#
print
(
words
)
pickle
.
dump
(
words
,
open
(
'words.pkl'
,
'wb'
))
pickle
.
dump
(
classes
,
open
(
'classes.pkl'
,
'wb'
))
training
=
[]
output_empty
=
[
0
]
*
len
(
classes
)
for
document
in
documents
:
bag
=
[]
word_patterns
=
document
[
0
]
word_patterns
=
[
lemmatizer
.
lemmatize
(
word
.
lower
())
for
word
in
word_patterns
]
for
word
in
words
:
bag
.
append
(
1
)
if
word
in
word_patterns
else
bag
.
append
(
0
)
output_row
=
list
(
output_empty
)
output_row
[
classes
.
index
(
document
[
1
])]
=
1
training
.
append
([
bag
,
output_row
])
random
.
shuffle
(
training
)
training
=
np
.
array
(
training
)
train_x
=
list
(
training
[:,
0
])
train_y
=
list
(
training
[:,
1
])
model
=
Sequential
()
model
.
add
(
Dense
(
128
,
input_shape
=(
len
(
train_x
[
0
]),),
activation
=
'relu'
))
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Dense
(
64
,
activation
=
'relu'
))
model
.
add
(
Dropout
(
0.5
))
model
.
add
(
Dense
(
len
(
train_y
[
0
]),
activation
=
'softmax'
))
sgd
=
gradient_descent_v2
.
SGD
(
learning_rate
=
0.01
,
decay
=
1e-6
,
momentum
=
0.9
,
nesterov
=
True
)
model
.
compile
(
loss
=
'categorical_crossentropy'
,
optimizer
=
sgd
,
metrics
=[
'accuracy'
])
hist
=
model
.
fit
(
np
.
array
(
train_x
),
np
.
array
(
train_y
),
epochs
=
200
,
batch_size
=
5
,
verbose
=
1
)
model
.
save
(
'chatbotmodel.h5'
,
hist
)
print
(
"done"
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment