Generate a novel using Markov chains
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

119 lines
3.2 KiB

4 years ago
4 years ago
4 years ago
  1. '''
  2. markov.py - Gernerate a novel using Markov chains
  3. Copyright (C) 2020 Blink The Things
  4. This program is free software: you can redistribute it and/or modify
  5. it under the terms of the GNU Affero General Public License as published by
  6. the Free Software Foundation, either version 3 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU Affero General Public License for more details.
  12. You should have received a copy of the GNU Affero General Public License
  13. along with this program. If not, see <https://www.gnu.org/licenses/>.
  14. '''
  15. import argparse
  16. import numpy as np
  17. import os
  18. import spacy
  19. parser = argparse.ArgumentParser(description='Generate a novel using Markov chains.')
  20. parser.add_argument('word_file', help='file used for word selection')
  21. parser.add_argument('pos_file', help='file used to build part-of-speech Markov chain')
  22. parser.add_argument('-s', '--seed', type=int, help='seed for random number generator')
  23. args = parser.parse_args()
  24. nlp = spacy.load('en_core_web_sm')
  25. seed = args.seed or 12345
  26. rng = np.random.default_rng(seed)
  27. words_text = ''
  28. with open(args.word_file, mode='r') as f:
  29. words_text = f.read()
  30. words_doc = nlp(words_text)
  31. words = {}
  32. for sent in words_doc.sents:
  33. for token in sent:
  34. if token.pos_ in ('SPACE', 'PUNCT', 'X'):
  35. continue
  36. state = token.tag_
  37. word = token.text
  38. if state in words:
  39. words[state].append(word)
  40. else:
  41. words[state] = [word]
  42. pos_text = ''
  43. with open(args.pos_file, mode='r') as f:
  44. pos_text = f.read()
  45. pos_doc = nlp(pos_text)
  46. edges = []
  47. for sent in pos_doc.sents:
  48. curr_state = 'START'
  49. for token in sent:
  50. if token.pos_ in ('SPACE', 'PUNCT', 'X'):
  51. continue
  52. next_state = token.tag_
  53. edges.append((curr_state, next_state))
  54. curr_state = next_state
  55. edges.append((curr_state, 'STOP'))
  56. transitions = {}
  57. for edge in edges:
  58. if edge[0] in transitions:
  59. transitions[edge[0]]['cnt'] += 1
  60. if edge[1] in transitions[edge[0]]['to']:
  61. transitions[edge[0]]['to'][edge[1]] += 1
  62. else:
  63. transitions[edge[0]]['to'][edge[1]] = 1
  64. else:
  65. transitions[edge[0]] = { 'cnt': 1, 'to': {edge[1]: 1}}
  66. chain = {}
  67. for key in transitions.keys():
  68. cnt = transitions[key]['cnt']
  69. choices = list(transitions[key]['to'])
  70. probs = []
  71. for choice in choices:
  72. probs.append(transitions[key]['to'][choice] / cnt)
  73. chain[key] = { 'choices': choices, 'probs': probs}
  74. sents = []
  75. for _ in range(10):
  76. choice = 'START'
  77. choices = []
  78. while True:
  79. next_choice = rng.choice(chain[choice]['choices'], p=chain[choice]['probs'])
  80. if choice == 'START' and next_choice == 'STOP':
  81. continue
  82. if next_choice == 'STOP':
  83. sents.append(' '.join(choices))
  84. break
  85. word = rng.choice(words[next_choice])
  86. choices.append(word)
  87. choice = next_choice
  88. print(os.linesep.join(sents))