12345678910111213141516171819202122 |
- from random import choice
- from nltk.tokenize import sent_tokenize, word_tokenize
- with open("dixit.txt") as article:
- quotes = sent_tokenize(article.read())
- text_in = ""
- while text_in != "stop":
- text_in = input("> ")
- text_out = "> "
- words_in = word_tokenize(text_in)
- words_in.sort(key = lambda s: len(s)).reverse()
- quote_found = False
- for word in words_in:
- for quote in quotes:
- if word in quote:
- text_out += "À propos de %s, Einstein a dit un jour : \"%s\"." % (word, quote)
- quote_found = True
- break
- if quote_found:
- break
- print(choice(quotes));
|