diff --git a/python/markdown/.exercism/metadata.json b/python/markdown/.exercism/metadata.json new file mode 100644 index 0000000..cab27b5 --- /dev/null +++ b/python/markdown/.exercism/metadata.json @@ -0,0 +1 @@ +{"track":"python","exercise":"markdown","id":"4e53556c7e0a4cd4b08107bc26fdd6a3","url":"https://exercism.io/my/solutions/4e53556c7e0a4cd4b08107bc26fdd6a3","handle":"DmitryKokorin","is_requester":true,"auto_approve":false} \ No newline at end of file diff --git a/python/markdown/README.md b/python/markdown/README.md new file mode 100644 index 0000000..eae9836 --- /dev/null +++ b/python/markdown/README.md @@ -0,0 +1,58 @@ +# Markdown + +Refactor a Markdown parser. + +The markdown exercise is a refactoring exercise. There is code that parses a +given string with [Markdown +syntax](https://guides.github.com/features/mastering-markdown/) and returns the +associated HTML for that string. Even though this code is confusingly written +and hard to follow, somehow it works and all the tests are passing! Your +challenge is to re-write this code to make it easier to read and maintain +while still making sure that all the tests keep passing. + +It would be helpful if you made notes of what you did in your refactoring in +comments so reviewers can see that, but it isn't strictly necessary. The most +important thing is to make the code better! + + +## Exception messages + +Sometimes it is necessary to raise an exception. When you do this, you should include a meaningful error message to +indicate what the source of the error is. This makes your code more readable and helps significantly with debugging. Not +every exercise will require you to raise an exception, but for those that do, the tests will only pass if you include +a message. + +To raise a message with an exception, just write it as an argument to the exception type. For example, instead of +`raise Exception`, you should write: + +```python +raise Exception("Meaningful message indicating the source of the error") +``` + +## Running the tests + +To run the tests, run `pytest markdown_test.py` + +Alternatively, you can tell Python to run the pytest module: +`python -m pytest markdown_test.py` + +### Common `pytest` options + +- `-v` : enable verbose output +- `-x` : stop running tests on first failure +- `--ff` : run failures from previous test before running other test cases + +For other options, see `python -m pytest -h` + +## Submitting Exercises + +Note that, when trying to submit an exercise, make sure the solution is in the `$EXERCISM_WORKSPACE/python/markdown` directory. + +You can find your Exercism workspace by running `exercism debug` and looking for the line that starts with `Workspace`. + +For more detailed information about running tests, code style and linting, +please see [Running the Tests](http://exercism.io/tracks/python/tests). + +## Submitting Incomplete Solutions + +It's possible to submit an incomplete solution so you can see how others have completed the exercise. diff --git a/python/markdown/markdown.py b/python/markdown/markdown.py new file mode 100644 index 0000000..6f83971 --- /dev/null +++ b/python/markdown/markdown.py @@ -0,0 +1,35 @@ +import re + + +def parse(markdown): + res = '' + in_list = False + for line in markdown.splitlines(): + + match = re.match(r'^(#{1,6}) (.*)', line) + if match: + level = len(match.group(1)) + res += f'{match.group(2)}' + continue + + line = re.sub(r'__(.*)__', r'\1', line) + line = re.sub(r'_(.*)_', r'\1', line) + + match = re.match(r'\* (.*)', line) + if match: + if not in_list: + in_list = True + res += '' + + res += '

' + line + '

' + + if in_list: + res += '' + + return res diff --git a/python/markdown/markdown_test.py b/python/markdown/markdown_test.py new file mode 100644 index 0000000..2f36781 --- /dev/null +++ b/python/markdown/markdown_test.py @@ -0,0 +1,84 @@ +import unittest + +from markdown import parse + +# Tests adapted from `problem-specifications//canonical-data.json` + + +class MarkdownTest(unittest.TestCase): + def test_parses_normal_text_as_a_paragraph(self): + self.assertEqual( + parse("This will be a paragraph"), "

This will be a paragraph

" + ) + + def test_parsing_italics(self): + self.assertEqual( + parse("_This will be italic_"), "

This will be italic

" + ) + + def test_parsing_bold_text(self): + self.assertEqual( + parse("__This will be bold__"), "

This will be bold

" + ) + + def test_mixed_normal_italics_and_bold_text(self): + self.assertEqual( + parse("This will _be_ __mixed__"), + "

This will be mixed

", + ) + + def test_with_h1_header_level(self): + self.assertEqual(parse("# This will be an h1"), "

This will be an h1

") + + def test_with_h2_header_level(self): + self.assertEqual(parse("## This will be an h2"), "

This will be an h2

") + + def test_with_h6_header_level(self): + self.assertEqual( + parse("###### This will be an h6"), "
This will be an h6
" + ) + + def test_unordered_lists(self): + self.assertEqual( + parse("* Item 1\n* Item 2"), "" + ) + + def test_with_a_little_bit_of_everything(self): + self.assertEqual( + parse("# Header!\n* __Bold Item__\n* _Italic Item_"), + "

Header!

", + ) + + def test_with_markdown_symbols_in_the_header_text_that_should_not_be_interpreted( + self + ): + self.assertEqual( + parse("# This is a header with # and * in the text"), + "

This is a header with # and * in the text

", + ) + + def test_with_markdown_symbols_in_the_list_item_text_that_should_not_be_interpreted( + self + ): + self.assertEqual( + parse("* Item 1 with a # in the text\n* Item 2 with * in the text"), + "", + ) + + def test_with_markdown_symbols_in_the_paragraph_text_that_should_not_be_interpreted( + self + ): + self.assertEqual( + parse("This is a paragraph with # and * in the text"), + "

This is a paragraph with # and * in the text

", + ) + + def test_unordered_lists_close_properly_with_preceding_and_following_lines(self): + self.assertEqual( + parse("# Start a list\n* Item 1\n* Item 2\nEnd a list"), + "

Start a list

End a list

", + ) + + +if __name__ == "__main__": + unittest.main()