Can anyone check this SImple chatbot application and suggest me the corrective measures?

This chatbot will take the input from csv file and give the output to the user’s question…here problem I am facing is when user asks any question, it gives tkenize output of user’s text

#Block 1 """ In this block we will import all the required libraries  """ import pandas as pd                      # importing pandas to read the csv file import nltk import re from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer import random import string from sklearn.metrics.pairwise import cosine_similarity   #Block 2 """  This block will import csv file and save it to a variable   """  db_main = pd.read_csv(r'D:\Python\QnA_using_NLTK\qa_database.csv', sep = ',',                        names=["Question", "Answer", "user_response"])   # Block 3 """ Data Cleaning and preprocessing  """ #nltk.download('stopwords')    corpus = []             # corpus list is created to append output of initial cleaning of data  wordnet=WordNetLemmatizer() for i in range(0, len(db_main)):     review = re.sub('[^a-zA-Z0-9]', ' ', db_main['Question'][i])     review = review.lower()     review = review.split()     review = [wordnet.lemmatize(word) for word in review if not word in stopwords.words('english')]     review = ' '.join(review)     corpus.append(review)   #sent_tokens = nltk.sent_tokenize(db_main.Question)# converts to list of sentences  #word_tokens = nltk.word_tokenize(db_main.Question)# converts to list of words  #Block 4  """ This block will create tfidf vector and will create bag of words  """  # Creating the Bag of Words model cv = TfidfVectorizer() X = cv.fit_transform(corpus).toarray()  #Block 5 """ This block will define 2 functions"""  lemmer = WordNetLemmatizer() def LemTokens(tokens):     return [lemmer.lemmatize(token) for token in tokens] remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation) def LemNormalize(text):     return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))  #Block 6 """ this block will listdown user inputs and probable outputs  """  GREETING_INPUTS = ("hello", "hi", "greetings", "sup", "what's up","hey",) GREETING_RESPONSES = ["hi", "hey", "*nods*", "hi there", "hello", "I am glad! You are talking to me"] 

#Block 7

"""Checking for greetings """  def greeting(sentence):     """If user's input is a greeting, return a greeting response"""     for word in sentence.split():         if word.lower() in GREETING_INPUTS:             return random.choice(GREETING_RESPONSES)     # Block 8 """ Generating response """ def response(user_response):     robo_response=''     corpus.append(user_response)     TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')     tfidf = TfidfVec.fit_transform(corpus)     vals = cosine_similarity(tfidf[-1], tfidf)     idx=vals.argsort()[0][-2]     flat = vals.flatten()     flat.sort()     req_tfidf = flat[-2]     if(req_tfidf==0):         robo_response=robo_response+"I am sorry! I don't understand you"         return robo_response     else:         robo_response = robo_response+corpus[idx]         return robo_response   flag=True print("ROBO: My name is Robo. I will answer your queries about Chatbots. If you want to exit, type Bye!")  while(flag==True):     user_response = input()     user_response=user_response.lower()     if(user_response!='bye'):         if(user_response=='thanks' or user_response=='thank you' ):             flag=False             print("ROBO: You are welcome..")         else:             if(greeting(user_response)!=None):                 print("ROBO: "+greeting(user_response))             else:                 print("ROBO: ",end="")                 print(response(user_response))                 corpus.remove(user_response)     else:         flag=False         print("ROBO: Bye! take care..")