Machine learning chatbot with dialogflow, Jupyter notebook,flask,angular,nodeJS – part2

Last time we enabled webhook for our intent.
This allows use to trigger our own methods/ logic to execute whenever a intent is completed.
In our case, when all the inputs are received from the user for titanic prediction then we want to execute our own custom method where we will pass these inputs to a backend service as a http request and want a prediction as a response.
This can be done with the fulfillment option of the dialogflow.
Now go to the Fulfillment option and enable Inline editor. Currently we are doing this in the inline editor which dialogflow platform provides use . But later, we will build our own backend to do the same thing.
Additionally, we will install axios using package.json for making http request.
Hence our package.json will look like below:

"name": "dialogflowFirebaseFulfillment",
"description": "This is the default fulfillment for a Dialogflow agents using Cloud Functions for Firebase",
"version": "0.0.1",
"private": true,
"license": "Apache Version 2.0",
"author": "Google Inc.",
"engines": {
"node": "8"
"scripts": {
"start": "firebase serve --only functions:dialogflowFirebaseFulfillment",
"deploy": "firebase deploy --only functions:dialogflowFirebaseFulfillment"
"dependencies": {
"actions-on-google": "^2.2.0",
"firebase-admin": "^5.13.1",
"firebase-functions": "^2.0.2",
"dialogflow": "^0.6.0",
"dialogflow-fulfillment": "^0.5.0",
"axios": "^0.19.2"

The main file is the index.js where we will implement our logic and methods.
Basically, we added a method/function named ‘getPrediction‘ and it will be called whenever ‘titanic.inputs‘ intent will be executed successfully.
getPrediction receives all the inputs from the dialogflow agent.
If u can recall our ML titanic model, they allowed 1/2/3 numbers ,hence we need to classify the inputs just like our model. Finally, we will call our flask backend where previously we deployed our ML model. When we get a prediction result from the flask backend we will show whether the passenger would survive or not based on the response value.

index.js skeleton:


'use strict';
const functions = require('firebase-functions');
const {WebhookClient} = require('dialogflow-fulfillment');
const {Card, Suggestion} = require('dialogflow-fulfillment');
const axios = require('axios');

process.env.DEBUG = 'dialogflow:debug'; // enables lib debugging statements

exports.dialogflowFirebaseFulfillment = functions.https.onRequest((request, response) => {
  const agent = new WebhookClient({ request, response });
  console.log('Dialogflow Request headers: ' + JSON.stringify(request.headers));
  console.log('Dialogflow Request body: ' + JSON.stringify(request.body));
  function welcome(agent) {
    agent.add(`Welcome to my agent!`);
  function fallback(agent) {
    agent.add(`I didn't understand`);
    agent.add(`I'm sorry, can you try again?`);
  function getPrediction(agent) { 
    const name =,
          age = agent.parameters.age,
          gender = agent.parameters.gender,
          family_size = agent.parameters.family_size,
          ticket_class = agent.parameters.ticket_class,
          ticket_price = agent.parameters.ticket_price,
          embark = agent.parameters.embark;

    var title,ag,sex,fare,pclass,fsize,mbark;
    if(name.toLowerCase().includes("Mr")) title=0;
    else  if(name.toLowerCase().includes("Mrs")) title=1;
    else  if(name.toLowerCase().includes("Miss")) title=2;
    else  if(name.toLowerCase().includes("Master")) title=3;
    else title=4;
    if(age<=14) ag=0;
    else if(age>14 && age<=22) ag=1;
    else if(age>22 && age<=36) ag=2;
    else if(age>36 && age<=55) ag=3;
    else ag=4;
    sex= gender === 'male'?0:1;
    if(ticket_price.toLowerCase().startsWith("l")) fare=0;
    else if(ticket_price.toLowerCase().startsWith("m")) fare=1;
    else if(ticket_price.toLowerCase().startsWith("h")) fare=2;
    else fare=3;
    if(ticket_class==='1') pclass=1;
    else if(ticket_price==='2') pclass=2;
    else  pclass=3;
    if(family_size===1) fsize=0;
    else if(family_size<=3) fsize=1;
    else fsize=2;
    if(embark.toLowerCase().startsWith("s")) mbark=0;
    else if(embark.toLowerCase().startsWith("q")) mbark=1;
    else  mbark=1;
			.then(function (response) {
				agent.add(`According to the ML model, it seems you would survive if you were at titanic on that time.`);
			  else agent.add(`According to the ML model, it seems you wouldn't survive if you were at titanic on that time.`);

			.catch(function (error) {
			   agent.add(`I'm sorry,something went wrong`);

  // Run the proper function handler based on the matched Dialogflow intent name
  let intentMap = new Map();
  intentMap.set('Default Welcome Intent', welcome);
  intentMap.set('Default Fallback Intent', fallback);
  intentMap.set('titanic.inputs', getPrediction);

Deploy the files by clicking the deploy button from the bottom.
Now if we again go to ‘Try it now’ from the dialogflow then we will be able to get the prediction result from the server using the ML model.

In the next part, we will make our own backend.

Leave a Reply

Fill in your details below or click an icon to log in: Logo

You are commenting using your account. Log Out /  Change )

Google photo

You are commenting using your Google account. Log Out /  Change )

Twitter picture

You are commenting using your Twitter account. Log Out /  Change )

Facebook photo

You are commenting using your Facebook account. Log Out /  Change )

Connecting to %s