diff --git a/examples/book_translation/translate_latex_book.ipynb b/examples/book_translation/translate_latex_book.ipynb index 8f27b32..c68cdb8 100644 --- a/examples/book_translation/translate_latex_book.ipynb +++ b/examples/book_translation/translate_latex_book.ipynb @@ -90,7 +90,7 @@ "source": [ "It turns out that a double newline is a good separator in this case, in order not to break the flow of the text. Also no individual chunk is larger than 1500 tokens. The model we will use is text-davinci-002, which has a limit of 4096 tokens, so we don't need to worry about breaking the chunks down further.\n", "\n", - "We will group the shorter chunks into chunks of around 1000 tokens, to increase the coherence of the text, and the frequency of breaks within the text." + "We will group the shorter chunks into chunks of around 1000 tokens, to increase the coherence of the text, and decrease the frequency of breaks within the text." ] }, { diff --git a/examples/fine-tuned_qa/olympics-3-train-qa.ipynb b/examples/fine-tuned_qa/olympics-3-train-qa.ipynb index b65db9c..c046e3c 100644 --- a/examples/fine-tuned_qa/olympics-3-train-qa.ipynb +++ b/examples/fine-tuned_qa/olympics-3-train-qa.ipynb @@ -228,7 +228,7 @@ "\n", "This process is noisy, as sometimes the question might be answerable given a different context, but on average we hope this won't affect the peformance too much.\n", "\n", - "We apply the same process of dataset creation for both the discriminator, and the Q&A answering model. We apply the process separately for the training and testing set, to ensure that the examples from the traing set don't feature within the test set." + "We apply the same process of dataset creation for both the discriminator, and the Q&A answering model. We apply the process separately for the training and testing set, to ensure that the examples from the training set don't feature within the test set." ] }, {