infra-ordinaire-no-func~20171217-110702.py 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. import nltk
  2. #import feedparser
  3. #import wikipedia
  4. from duckduckpy import query
  5. import random
  6. #https://jcutrer.com/howto/dev/python/python-tutorial-howto-parse-rss-headlines
  7. #feed = feedparser.parse('http://feeds.feedburner.com/time/scienceandhealth?format=xml')
  8. #print(feed['title'][0])
  9. #print(feed['feed']['title'][0])
  10. #print (type(feed))
  11. sentence = "Encoded inside a microchip are the patterns of my fingertips"
  12. print('\n')
  13. print(sentence)
  14. tokens = nltk.word_tokenize(sentence)
  15. words = []
  16. for word, pos in nltk.pos_tag(tokens):
  17. if pos == 'NN':
  18. words.append(word)
  19. print('\n')
  20. print(words)
  21. list_len = len(words)
  22. ran_num = random.randint(0,list_len-1)
  23. print(ran_num)
  24. res = query(words[ran_num], container='dict')
  25. print('\n\t' + res['related_topics'][0]['text'])
  26. tokens = nltk.word_tokenize(res['related_topics'][0]['text'])
  27. words = []
  28. for word, pos in nltk.pos_tag(tokens):
  29. if pos == 'NN':
  30. words.append(word)
  31. print('\n')
  32. print(words)
  33. list_len = len(words)
  34. ran_num = random.randint(0,list_len-1)
  35. print(ran_num)
  36. res = query(words[ran_num], container='dict')
  37. print('\n\t\t' + res['related_topics'][0]['text'])
  38. tokens = nltk.word_tokenize(res['related_topics'][0]['text'])
  39. words = []
  40. for word, pos in nltk.pos_tag(tokens):
  41. if pos == 'NN':
  42. words.append(word)
  43. print('\n')
  44. print(words)
  45. list_len = len(words)
  46. ran_num = random.randint(0,list_len-1)
  47. print(ran_num)
  48. res = query(words[ran_num], container='dict')
  49. print('\n\t\t\t' + res['related_topics'][0]['text'])