1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071 |
- import nltk
- #import feedparser
- #import wikipedia
- from duckduckpy import query
- import random
- #https://jcutrer.com/howto/dev/python/python-tutorial-howto-parse-rss-headlines
- #feed = feedparser.parse('http://feeds.feedburner.com/time/scienceandhealth?format=xml')
- #print(feed['title'][0])
- #print(feed['feed']['title'][0])
- #print (type(feed))
- sentence = "Encoded inside a microchip are the patterns of my fingertips"
- print('\n')
- print(sentence)
- tokens = nltk.word_tokenize(sentence)
- words = []
- for word, pos in nltk.pos_tag(tokens):
- if pos == 'NN':
- words.append(word)
- print('\n')
- print(words)
- list_len = len(words)
- ran_num = random.randint(0,list_len-1)
- print(ran_num)
- res = query(words[ran_num], container='dict')
- print('\n\t' + res['related_topics'][0]['text'])
- tokens = nltk.word_tokenize(res['related_topics'][0]['text'])
- words = []
- for word, pos in nltk.pos_tag(tokens):
- if pos == 'NN':
- words.append(word)
- print('\n')
- print(words)
- list_len = len(words)
- ran_num = random.randint(0,list_len-1)
- print(ran_num)
- res = query(words[ran_num], container='dict')
- print('\n\t\t' + res['related_topics'][0]['text'])
- tokens = nltk.word_tokenize(res['related_topics'][0]['text'])
- words = []
- for word, pos in nltk.pos_tag(tokens):
- if pos == 'NN':
- words.append(word)
- print('\n')
- print(words)
- list_len = len(words)
- ran_num = random.randint(0,list_len-1)
- print(ran_num)
- res = query(words[ran_num], container='dict')
- print('\n\t\t\t' + res['related_topics'][0]['text'])
|