diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..c9953e2 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# These are supported funding model platforms + +ko_fi: alstr18858 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 440ed22..53d1400 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,8 +6,10 @@ jobs: build: runs-on: "ubuntu-latest" steps: - - uses: "actions/checkout@v3" - - uses: "actions/setup-python@v4" + - uses: "actions/checkout@v4" + - uses: "actions/setup-python@v5" + with: + python-version: '3.12' - name: "Install test dependencies" run: | python -m pip install --upgrade pip diff --git a/.github/workflows/todo.yml b/.github/workflows/todo.yml index 848a2ea..5b3b632 100644 --- a/.github/workflows/todo.yml +++ b/.github/workflows/todo.yml @@ -13,7 +13,7 @@ jobs: build: runs-on: "ubuntu-latest" steps: - - uses: "actions/checkout@v3" + - uses: "actions/checkout@v4" - name: "TODO to Issue" uses: "alstr/todo-to-issue-action@master" env: @@ -21,5 +21,3 @@ jobs: ${{ inputs.MANUAL_COMMIT_REF }} MANUAL_BASE_REF: ${{ inputs.MANUAL_BASE_REF }} - with: - PROJECTS_SECRET: ${{ secrets.PROJECTS_SECRET }} diff --git a/.gitignore b/.gitignore index 11ca46f..512caa4 100644 --- a/.gitignore +++ b/.gitignore @@ -139,4 +139,5 @@ dmypy.json # Cython debug symbols cython_debug/ - +# JetBrains +.idea \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 3bcfdde..0897921 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,7 +6,7 @@ RUN pip install --target=/app requests RUN pip install --target=/app -U pip setuptools wheel RUN pip install --target=/app ruamel.yaml -FROM gcr.io/distroless/python3-debian10 +FROM gcr.io/distroless/python3-debian12 COPY --from=builder /app /app WORKDIR /app ENV PYTHONPATH /app diff --git a/README.md b/README.md index 31119fd..21c0b43 100644 --- a/README.md +++ b/README.md @@ -1,54 +1,87 @@ -# TODO to Issue Action +# TODO to Issue -This action will convert newly committed TODO comments to GitHub issues on push. +

+ Latest release + Issues labelled 'help wanted' + Issues labelled 'good first issue' +

-Optionally, issues can also be closed when the TODOs are removed in a future commit. +Action to create, update and close issues based on committed TODO comments. -Action supports: +![Diagram showing how the action works](diagram.png) -* Multiple, customizable comments identifiers (FIXME, etc.), -* Configurable auto-labeling, -* Assignees, -* Milestones, -* Projects (classic). +Features: + +* Multiple, customisable comment identifiers (`FIXME`, etc.) +* Configurable auto-labeling +* Assignees +* Milestones +* Projects `todo-to-issue` works with almost any programming language. +## What's New + +v5 is the biggest release yet: + +* TODO reference handling +* Issue URL insertion +* Update and comment on existing issues +* Support for v2 projects +* Assign milestones by name +* Improved issue formatting +* Link issues to PRs + +See [Upgrading](#upgrading) for breaking changes. + ## Usage -Simply add a comment starting with TODO (or any other comment identifiers configured), followed by a colon and/or space. +Simply add a line or block comment starting with TODO (or any other comment identifiers configured), followed by a colon and/or space. Here's an example for Python creating an issue named after the TODO _description_: ```python - def hello_world(): - # TODO Come up with a more imaginative greeting - print('Hello world!') +def hello_world(): + # TODO: Come up with a more imaginative greeting + print('Hello world!') ``` _Multiline_ TODOs are supported, with additional lines inserted into the issue body: ```python - def hello_world(): - # TODO: Come up with a more imaginative greeting - # Everyone uses hello world and it's boring. - print('Hello world!') +def hello_world(): + # TODO: Come up with a more imaginative greeting + # Everyone uses hello world and it's boring. + print('Hello world!') ``` -As per the [Google Style Guide](https://google.github.io/styleguide/cppguide.html#TODO_Comments), you can provide a _reference_ after the TODO identifier. This will be included in the issue title for searchability. +As per the [Google Style Guide](https://google.github.io/styleguide/cppguide.html#TODO_Comments), you can provide a _reference_ after the TODO identifier: ```python - def hello_world(): - # TODO(alstr) Come up with a more imaginative greeting - # Everyone uses hello world and it's boring. - print('Hello world!') +def hello_world(): + # TODO(@alstr): Come up with a more imaginative greeting + # This will assign the issue to alstr. + print('Hello world!') + + # TODO(!urgent): This is wrong + # This will add an 'urgent' label. + assert 1 + 1 == 3 + + # TODO(#99): We need error handling here + # This will add the comment to the existing issue 99. + greeting_time = datetime.fromisoformat(date_string) + + # TODO(language): Localise this string + # This will prepend the reference to the issue title + dialogue = "TODO or not TODO, that is the question." ``` -Don't include parentheses within the reference itself. +Only one reference can be provided. Should you wish to further configure the issue, you can do so via +[TODO Options](#todo-options). ## TODO Options -A range of options can also be provided to apply to the new issue. +A range of options can also be provided to apply to the issue, in addition to any reference supplied. Options follow the `name: value` syntax. Unless otherwise specified, options should be on their own line, below the initial TODO declaration and 'body'. @@ -58,11 +91,11 @@ Unless otherwise specified, options should be on their own line, below the initi Comma-separated list of usernames to assign to the issue: ```python - def hello_world(): - # TODO(alstr): Come up with a more imaginative greeting - # Everyone uses hello world and it's boring. - # assignees: alstr, bouteillerAlan, hbjydev - print('Hello world!') +def hello_world(): + # TODO: Come up with a more imaginative greeting + # Everyone uses hello world and it's boring. + # assignees: alstr, bouteillerAlan, hbjydev + print('Hello world!') ``` ### Labels @@ -70,171 +103,315 @@ Comma-separated list of usernames to assign to the issue: Comma-separated list of labels to add to the issue: ```python - def hello_world(): - # TODO(alstr): Come up with a more imaginative greeting - # Everyone uses hello world and it's boring. - # labels: enhancement, help wanted - print('Hello world!') +def hello_world(): + # TODO: Come up with a more imaginative greeting + # Everyone uses hello world and it's boring. + # labels: enhancement, help wanted + print('Hello world!') ``` If any of the labels do not already exist, they will be created. -The `todo` label is automatically added to issues to help the action efficiently retrieve them in the future. - ### Milestone -Milestone `ID` to assign to the issue: +Milestone name to assign to the issue: ```python - def hello_world(): - # TODO(alstr): Come up with a more imaginative greeting - # Everyone uses hello world and it's boring. - # milestone: 1 - print('Hello world!') +def hello_world(): + # TODO: Come up with a more imaginative greeting + # Everyone uses hello world and it's boring. + # milestone: v3.0 + print('Hello world!') ``` -Only a single milestone can be specified and it must already exist. - -### Projects - -_Please note, the action currently only supports classic user and organisation projects, and not 'new' projects._ - -With some additional setup, you can assign the created issues a status (column) within user or organisation projects. - -By default, the action cannot access your projects. To enable it, you must: - -* [Create a Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token), -* [Create an encrypted secret in your repo settings](https://docs.github.com/en/actions/reference/encrypted-secrets#creating-encrypted-secrets-for-a-repository), with the value set to the Personal Access Token, -* Assign the secret in the workflow file like `PROJECTS_SECRET: ${{ secrets.PROJECTS_SECRET }}`. _Do not enter the raw secret_. - -Projects are identified by their `full project name and issue status` (column) reference with the `/project name/status name` syntax. - -* To assign to a _user project_, use the `user projects:` option. -* To assign to an _organisation project_, use `org projects:` option. - -```python - def hello_world(): - # TODO Come up with a more imaginative greeting - # Everyone uses hello world and it's boring. - # user projects: alstr/Test User Project/To Do - # org projects: alstrorg/Test Org Project/To Do - print('Hello world!') -``` - -You can assign issues to multiple projects separating them with commas, i.e. `user projects: alstr/Test User Project 1/To Do, alstr/Test User Project 2/Tasks`. - -You can also specify `default projects` in the same way by defining `USER_PROJECTS` or `ORG_PROJECTS` in your workflow file. -These will be applied automatically to every issue, but will be overrode by any specified within the TODO. +Only a single milestone can be specified. If the milestone does not exist, it will be created. ## Supported Languages -- ABAP -- ABAP CDS -- AutoHotkey -- C -- C++ -- C# -- CSS -- Crystal -- Clojure -- Dart -- Elixir -- GDScript -- Go -- Handlebars -- HCL -- Haskell -- HTML -- Java -- JavaScript -- JSON5 -- JSON with Comments -- Julia -- Kotlin -- Less -- Markdown -- Nix -- Objective-C -- Org Mode -- PHP -- Python -- R -- Razor -- RMarkdown -- Ruby -- Rust -- Sass -- Scala -- SCSS -- Shell -- SQL -- Starlark -- Swift -- TeX -- TSX -- Twig -- TypeScript -- Visual Basic for Applications (VBA) -- Vue -- XML -- YAML +- ABAP +- ABAP CDS +- Agda +- AutoHotkey +- C +- C++ +- C# +- CSS +- Crystal +- Clojure +- Cuda +- Dart +- Elixir +- GDScript +- Go +- Handlebars +- HCL +- Haskell +- HTML +- Java +- JavaScript +- JSON5 +- JSON with Comments +- Julia +- Kotlin +- Less +- Liquid +- Makefile +- Markdown +- Nix +- Objective-C +- Org Mode +- PHP +- Python +- PureScript +- R +- Razor +- RMarkdown +- Ruby +- Rust +- Sass +- Scala +- SCSS +- Shell +- SQL +- Starlark +- Swift +- TeX +- TSX +- Twig +- TypeScript +- Visual Basic for Applications (VBA) +- Vue +- XML +- YAML -New languages can easily be added to the `syntax.json` file, used by the action to identify TODO comments. +New languages can easily be added to the `syntax.json` file used by the action to identify TODO comments. -When adding languages, follow the structure of existing entries, and use the language name defined by GitHub in [`languages.yml`](https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml). - -Of course, PRs adding new languages are welcome and appreciated. Please add a test for your language in order for your PR to be accepted. See [Contributing](#contributing--issues). +PRs adding new languages are welcome and appreciated. See [Contributing](#contributing--issues). ## Setup -Create a `workflow.yml` file in your `.github/workflows` directory like: +In the repo where you want the action to run, go to `Settings -> Actions (General) -> Workflow permissions` and enable +"Read and write permissions". + +Next, create a `workflow.yml` file in your `.github/workflows` directory: ```yml name: "Run TODO to Issue" -on: ["push"] +on: [ "push" ] jobs: - build: - runs-on: "ubuntu-latest" - steps: - - uses: "actions/checkout@v3" - - name: "TODO to Issue" - uses: "alstr/todo-to-issue-action@v4" + build: + runs-on: "ubuntu-latest" + steps: + - uses: "actions/checkout@v4" + - name: "TODO to Issue" + uses: "alstr/todo-to-issue-action@v5" ``` -See [Github's workflow syntax](https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions) for further details on this file. +### URL Insertion -The workflow file takes the following optional inputs: +The action can insert the URL for a created issue back into the associated TODO. -| Input | Required | Description | -| ---------------- | -------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `CLOSE_ISSUES` | No | Optional boolean input that specifies whether to attempt to close an issue when a TODO is removed. Default: `true`. | -| `AUTO_P` | No | Optional boolean input that specifies whether to format each line in multiline TODOs as a new paragraph. Default: `true`. | -| `IGNORE` | No | Optional string input that provides comma-delimited regular expressions that match files in the repo that we should not scan for TODOs. By default, we will scan all files. | -| `AUTO_ASSIGN` | No | Optional boolean input that specifies whether to assign the newly created issue to the user who triggered the action. If users are manually assigned to an issue, this setting is ignored. Default: `false`. | -| `ISSUE_TEMPLATE` | No | You can override the default issue template by providing your own here. Markdown is supported, and you can inject the issue title, body, code URL and snippet. Example: `"This is my issue title: **{{ title }}**\n\nThis is my issue body: **{{ body }}**\n\nThis is my code URL: **{{ url }}**\n\nThis is my snippet:\n\n{{ snippet }}"` | -| `IDENTIFIERS` | No | A list of dictionaries specifying the identifiers for the action to recognise. `TODO` is the default, but you can override this here, and specify default labels to be applied when creating issues for each identifier. JSON string must be valid with double quoted keys/values and itself single-quoted (or double-quoted and escaped). Example: `'[{"name": "TODO", "labels": ["help wanted"]}, {"name": "FIXME", "labels": ["bug"]}]'` (`labels` should be an empty list if no default labels are wanted) | +This allows for tighter integration between issues and TODOs, enables updating issues by editing TODOs, and improves the +accuracy of the action when closing TODOs. -These can be specified using `with` parameter in the workflow file, as below: +A new feature in v5, it is disabled by default. To enable URL insertion, some extra config is required: ```yml name: "Run TODO to Issue" -on: ["push"] +on: [ "push" ] jobs: - build: - runs-on: "ubuntu-latest" - steps: - - uses: "actions/checkout@v3" - - name: "TODO to Issue" - uses: "alstr/todo-to-issue-action@v4" - with: - AUTO_ASSIGN: true + build: + runs-on: "ubuntu-latest" + steps: + - uses: "actions/checkout@v4" + - name: "TODO to Issue" + uses: "alstr/todo-to-issue-action@v5" + with: + INSERT_ISSUE_URLS: "true" + - name: Set Git user + run: | + git config --global user.name "github-actions[bot]" + git config --global user.email "github-actions[bot]@users.noreply.github.com" + - name: Commit and Push Changes + run: | + git add . + git commit -m "Automatically added GitHub issue links to TODOs" + git push origin main ``` -### Considerations +You will probably also want to use the setting `CLOSE_ISSUES: "true"`, to allow issues to be closed when a TODO is +removed. -- TODOs are found by analysing the difference between the new commit and its previous one (i.e., the diff). That means that if this action is implemented during development, any existing TODOs will not be detected. For them to be detected, you would have to remove them, commit, put them back, and commit again, or [run the action manually](#running-the-action-manually). -- Should you change the TODO text, this will currently create a new issue. -- Closing TODOs is still somewhat experimental. +Please note, URL insertion works best with line comments, as it has to insert a line into the file. If using block +comments, you should put the start and end tags on their own lines. This may be improved in the future. + +This feature is not perfect. Please make sure you're comfortable with that before enabling. + +### Projects + +You can configure the action to add newly created issues to a specified v2 project (i.e., not a classic project). + +The action does not have sufficient permissions by default, so you will need to create a new Personal Access Token with +the `repo` and `project` scopes. + +Then, in your repo, go to `Settings -> Secrets and variables (Actions) -> Secrets`, and enter the value as a new +repository secret with the name `PROJECTS_SECRET`. + +Finally, add the following to the workflow file, under `with`: +``` +PROJECT: "user/alstr/test" +PROJECTS_SECRET: "${{ secrets.PROJECTS_SECRET }}" +``` + +Where `PROJECT` is a string of the form `account_type/owner/project_name`. Valid values for `account_type` are `user` or `organization`. + +All newly created issues will then be automatically added to the specified project. + +### Custom Languages + +If you want to add language definitions that are not currently supported, or overwrite existing ones, you can do so +using the `LANGUAGES` input. + +Just create a file that contains an array of languages, each with the following properties: + +| Property | Description | +|------------|----------------------------------------------------------------------------------------------------------------------------------------------------| +| language | The unique name of the language | +| extensions | A list of file extensions for the custom language | +| markers | A list of objects (see example below) to declare the comment markers. Make sure to escape all special Markdown characters with a double backslash. | + +For example, here is a language declaration file for Java: + +```json +[ + { + "language": "Java", + "extensions": [ + ".java" + ], + "markers": [ + { + "type": "line", + "pattern": "//" + }, + { + "type": "block", + "pattern": { + "start": "/\\*", + "end": "\\*/" + } + } + ] + } +] +``` + +Next, add the file to the `LANGUAGES` property in your workflow file. + +**Using a Local File:** + +`LANGUAGES: "path/to/my/file.json"` + +**Using a Remote File:** + +`LANGUAGES: "https://myserver.com/path/to/my/file.json"` + +Multiple paths can be provided by entering a comma-delimited string. + +### All Settings + +The workflow file takes the following optional inputs, specified under the `with` parameter: + +#### AUTO_ASSIGN + +Automatically assign new issues to the user who triggered the action. + +Default: `False` + +#### AUTO_P + +For multiline TODOs, format each line as a new paragraph when creating the issue. + +Default: `True` + +#### CLOSE_ISSUES + +Whether to close an issue when a TODO is removed. If enabling this, also enabling `INSERT_ISSUE_URLS` is recommended +for improved accuracy. + +Default: `False` + +#### ESCAPE + +Escape all special Markdown characters. + +Default: `True` + +#### GITHUB_URL + +Base URL of GitHub API. In most cases you will not need to change this. + +Default: `${{ github.api_url }}` + +#### IDENTIFIERS + +List of custom identifier dictionaries. Use this to add support for `FIXME` and other identifiers, and assign default +labels. + +Default: `[{"name": "TODO", "labels": []}]` + +#### INSERT_ISSUE_URLS + +Whether to insert the URL for a new issue back into the associated TODO. + +See [URL Insertion](#url-insertion). + +Default: `False` + +#### IGNORE + +A collection of comma-delimited regular expressions that match files that should be ignored when searching for TODOs. + +#### ISSUE_TEMPLATE + +Custom template used to format new issues. This is a string that accepts Markdown, linebreaks and the following +placeholders: + +* `{{ title }}`: issue title +* `{{ body }}`: issue body +* `{{ url }}`: URL to the line +* `{{ snippet }}`: code snippet of the relevant section + +If not specified the standard template is used, containing the issue body (if a multiline TODO), URL and snippet. + +#### LANGUAGES + +A collection of comma-delimited URLs or local paths (starting from the current working directory of the action) +for custom languages. + +See [Custom Languages](#custom-languages). + +#### NO_STANDARD + +Exclude loading the default `syntax.json` and `languages.yml` files. + +Default: `False` + +#### PROJECT + +A string specifying a v2 project where issues should be added. + +Use the format `account_type/owner/project_name`. Valid values for `account_type` are `user` or `organization`. + +See [Projects](#projects). + +#### PROJECTS_SECRET + +A Personal Access Token with the `repo` and `project` scopes, required for enabling support for projects. + +It should be of the form `${{ secrets.PROJECTS_SECRET }}`. Do not enter actual secret. + +See [Projects](#projects). ## Running the action manually @@ -245,51 +422,70 @@ You can run the action manually by adding support for the `workflow_dispatch` ev ```yaml name: "Run TODO to Issue" on: - push: - workflow_dispatch: - inputs: - MANUAL_COMMIT_REF: - description: "The SHA of the commit to get the diff for" - required: true - MANUAL_BASE_REF: - description: "By default, the commit entered above is compared to the one directly before it; to go back further, enter an earlier SHA here" - required: false + push: + workflow_dispatch: + inputs: + MANUAL_COMMIT_REF: + description: "The SHA of the commit to get the diff for" + required: true + MANUAL_BASE_REF: + description: "By default, the commit entered above is compared to the one directly before it; to go back further, enter an earlier SHA here" + required: false jobs: - build: - runs-on: "ubuntu-latest" - steps: - - uses: "actions/checkout@v3" - - name: "TODO to Issue" - uses: "alstr/todo-to-issue-action@master" - env: - MANUAL_COMMIT_REF: ${{ inputs.MANUAL_COMMIT_REF }} - MANUAL_BASE_REF: ${{ inputs.MANUAL_BASE_REF }} + build: + runs-on: "ubuntu-latest" + steps: + - uses: "actions/checkout@v4" + - name: "TODO to Issue" + uses: "alstr/todo-to-issue-action@v5" + env: + MANUAL_COMMIT_REF: ${{ inputs.MANUAL_COMMIT_REF }} + MANUAL_BASE_REF: ${{ inputs.MANUAL_BASE_REF }} ``` -Head to the Actions section of your repo, select the workflow and then 'Run workflow'. +Head to the actions section of your repo, select the workflow and then 'Run workflow'. -You can run the workflow for a single commit by entering the commit SHA in the first box. In this case, the action will compare the commit to the one directly before it. +You can run the workflow for a single commit by entering the commit SHA in the first box. In this case, the action will +compare the commit to the one directly before it. -You can also compare a broader range of commits. For that, also enter the 'from'/base commit SHA in the second box. +You can also compare a broader range of commits. For that, also enter the 'from' or base commit SHA in the second box. + +## Upgrading + +If upgrading from v4 to v5, please note the following: + +* Milestones are now specified by name, not ID. +* Support for classic projects has been removed, together with the `user_projects:` and `org_projects:` options, + and `USER_PROJECTS` and `ORG_PROJECTS` workflow settings. +* The `todo` label is no longer set on created issues. ## Troubleshooting ### No issues have been created -- Make sure your file language is in `syntax.json`. -- The action will not recognise existing TODOs that have already been pushed, unless you [run the action manually](#running-the-action-manually). -- If a similar TODO appears in the diff as both an addition and deletion, it is assumed to have been moved, so is ignored. -- If your workflow is executed but no issue is generated, check your repo permissions by navigating to `Settings -> Actions (General) -> Workflow permissions` and enable "Read and write permissions". +- Make sure your file language is in `syntax.json`. +- TODOs are found by analysing the difference between the new commit and its previous one (i.e., the diff). This means + that if this action is implemented during development, any existing TODOs will not be detected. For them to be + detected, you would have to re-commit them, or [run the action manually](#running-the-action-manually). +- If your workflow is executed but no issue is generated, check your repo permissions by navigating to + `Settings -> Actions (General) -> Workflow permissions` and enable "Read and write permissions". ### Multiple issues have been created -Issues are created whenever the action runs and finds a newly added TODO in the diff. Rebasing may cause a TODO to show up in a diff multiple times. This is an acknowledged issue, but you may have some luck by adjusting your workflow file. +Issues are created whenever the action runs and finds a newly added TODO in the diff. This can lead to duplicate +issues if a diff is processed multiple times. + +Enabling [URL Insertion](#url-insertion) can help with the detection of existing issues. ## Contributing & Issues -If you do encounter any problems, please file an issue or submit a PR. Everyone is welcome and encouraged to contribute. +If encounter any problems, please file an issue or submit a PR. Everyone is welcome and encouraged to contribute. -**If submitting a request to add a new language, please ensure you add the appropriate tests covering your language. In the interests of stability, PRs without tests cannot be considered.** +**If submitting a request to add a new language, please ensure you add the appropriate tests covering your language. +In the interests of stability, PRs without tests cannot be considered.** + +When adding languages, follow the structure of existing entries, and use the language name defined by +[GitHub's `languages.yml`](https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml) file. ## Running tests locally @@ -301,18 +497,25 @@ python -m unittest ## Customising -If you want to fork this action to customise its behaviour, there are a few steps you should take to ensure your changes run: +If you want to fork this action to customise its behaviour, there are a few steps you should take to ensure your changes +run: -- In `workflow.yml`, set `uses: ` to your action. -- In `action.yml`, set `image: ` to `Dockerfile`, rather than the prebuilt image. -- If customising `syntax.json`, you will want to update the URL in `main.py` to target your version of the file. +- In `workflow.yml`, set `uses: ` to your action. +- In `action.yml`, set `image: ` to `Dockerfile`, rather than the prebuilt image. +- If customising `syntax.json`, you will want to update the URL in `main.py` to target your version of the file. ## Thanks -The action was developed for the GitHub Hackathon. Whilst every effort is made to ensure it works, it comes with no guarantee. +The action was originally developed for the GitHub Hackathon in 2020. Whilst every effort is made to ensure it works, +it comes with no guarantee. -Thanks to Jacob Tomlinson for [his handy overview of GitHub Actions](https://www.jacobtomlinson.co.uk/posts/2019/creating-github-actions-in-python/). - -Thanks to GitHub's [linguist repo](https://github.com/github/linguist/) for the [`languages.yml`](https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml) file used by the app to look up file extensions and determine the correct highlighting to apply to code snippets. +Thanks to GitHub's [linguist repo](https://github.com/github/linguist/) for the [`languages.yml`](https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml) file used by the app to look up file extensions +and determine the correct highlighting to apply to code snippets. Thanks to all those who have [contributed](https://github.com/alstr/todo-to-issue-action/graphs/contributors) to the further development of this action. + +## Supporting the Project + +If you’ve found this action helpful and it has made your workflow easier, please consider buying a coffee to help keep it going. Thank you in advance! + +[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/alstr18858) diff --git a/action.yml b/action.yml index 8ab5762..fa4c08b 100644 --- a/action.yml +++ b/action.yml @@ -3,7 +3,7 @@ description: 'Converts IDE TODO comments to GitHub issues' author: 'Alastair Mooney' runs: using: 'docker' - image: 'docker://ghcr.io/alstr/todo-to-issue-action:v4.10.1' + image: 'docker://ghcr.io/alstr/todo-to-issue-action:v5.0' branding: icon: 'check-square' color: 'orange' @@ -17,7 +17,7 @@ inputs: required: false default: '${{ github.event.before || github.base_ref }}' COMMITS: - description: 'An array of commit objects describing the pushed commits' + description: 'An array of commit objects describing the pushed commits (automatically set)' required: false default: '${{ toJSON(github.event.commits) }}' DIFF_URL: @@ -32,29 +32,20 @@ inputs: description: 'The GitHub access token to allow us to retrieve, create and update issues (automatically set)' required: false default: ${{ github.token }} - LABEL: - description: 'The label that will be used to identify TODO comments (deprecated)' - required: false - COMMENT_MARKER: - description: 'The marker used to signify a line comment in your code (deprecated)' - required: false CLOSE_ISSUES: - description: 'Optional input that specifies whether to attempt to close an issue when a TODO is removed' + description: 'Optional input specifying whether to attempt to close an issue when a TODO is removed' required: false default: true AUTO_P: description: 'For multiline TODOs, format each line as a new paragraph when creating the issue' required: false default: true + PROJECT: + description: "User or organization project to link issues to, format 'project_type/owner/project_name'" + required: false PROJECTS_SECRET: description: 'Encrypted secret corresponding to your personal access token (do not enter the actual secret)' required: false - USER_PROJECTS: - description: 'Default user projects' - required: false - ORG_PROJECTS: - description: 'Default organisation projects' - required: false IGNORE: description: 'A collection of comma-delimited regular expression that matches files that should be ignored when searching for TODOs' required: false @@ -63,7 +54,7 @@ inputs: required: false default: false ACTOR: - description: 'The username of the person who triggered the action' + description: 'The username of the person who triggered the action (automatically set)' required: false default: '${{ github.actor }}' ISSUE_TEMPLATE: @@ -75,4 +66,19 @@ inputs: GITHUB_URL: description: 'Base url of GitHub API' required: false - default: ${{ github.api_url }} \ No newline at end of file + default: ${{ github.api_url }} + ESCAPE: + description: 'Escape all special Markdown characters' + required: false + default: true + LANGUAGES: + description: 'A collection of comma-delimited URLs or local paths for custom language files' + required: false + NO_STANDARD: + description: "Exclude loading the default 'syntax.json' and 'languages.yml' files from the repository" + required: false + default: false + INSERT_ISSUE_URLS: + description: 'Whether the action should insert the URL for a newly-created issue into the associated TODO comment' + required: false + default: false \ No newline at end of file diff --git a/diagram.png b/diagram.png new file mode 100644 index 0000000..4c9f052 Binary files /dev/null and b/diagram.png differ diff --git a/main.py b/main.py index 0558149..a9155eb 100644 --- a/main.py +++ b/main.py @@ -11,6 +11,8 @@ from ruamel.yaml import YAML from enum import Enum import itertools import operator +from collections import defaultdict +from urllib.parse import urlparse class LineStatus(Enum): @@ -23,26 +25,29 @@ class LineStatus(Enum): class Issue(object): """Basic Issue model for collecting the necessary info to send to GitHub.""" - def __init__(self, title, labels, assignees, milestone, user_projects, org_projects, body, hunk, file_name, - start_line, markdown_language, status, identifier): + def __init__(self, title, labels, assignees, milestone, body, hunk, file_name, + start_line, num_lines, markdown_language, status, identifier, ref, issue_url, issue_number): self.title = title self.labels = labels self.assignees = assignees self.milestone = milestone - self.user_projects = user_projects - self.org_projects = org_projects self.body = body self.hunk = hunk self.file_name = file_name self.start_line = start_line + self.num_lines = num_lines self.markdown_language = markdown_language self.status = status self.identifier = identifier + self.ref = ref + self.issue_url = issue_url + self.issue_number = issue_number class GitHubClient(object): - """Basic client for getting the last diff and creating/closing issues.""" + """Basic client for getting the last diff and managing issues.""" existing_issues = [] + milestones = [] def __init__(self): self.github_url = os.getenv('INPUT_GITHUB_URL') @@ -55,52 +60,98 @@ class GitHubClient(object): self.diff_url = os.getenv('INPUT_DIFF_URL') self.token = os.getenv('INPUT_TOKEN') self.issues_url = f'{self.repos_url}{self.repo}/issues' + self.milestones_url = f'{self.repos_url}{self.repo}/milestones' self.issue_headers = { 'Content-Type': 'application/json', - 'Authorization': f'token {self.token}' + 'Authorization': f'token {self.token}', + 'X-GitHub-Api-Version': '2022-11-28' + } + self.graphql_headers = { + 'Authorization': f'Bearer {os.getenv("INPUT_PROJECTS_SECRET", "")}', + 'Accept': 'application/vnd.github.v4+json' } auto_p = os.getenv('INPUT_AUTO_P', 'true') == 'true' self.line_break = '\n\n' if auto_p else '\n' - # Retrieve the existing repo issues now so we can easily check them later. - self._get_existing_issues() self.auto_assign = os.getenv('INPUT_AUTO_ASSIGN', 'false') == 'true' self.actor = os.getenv('INPUT_ACTOR') - - def get_timestamp(self, commit): - return commit.get('timestamp') + self.insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true' + if self.base_url == 'https://api.github.com/': + self.line_base_url = 'https://github.com/' + else: + self.line_base_url = self.base_url + self.project = os.getenv('INPUT_PROJECT', None) + # Retrieve the existing repo issues now so we can easily check them later. + self._get_existing_issues() + # Populate milestones so we can perform a lookup if one is specified. + self._get_milestones() def get_last_diff(self): """Get the last diff.""" if self.diff_url: - # Diff url was directly passed in config, likely due to this being a PR + # Diff url was directly passed in config, likely due to this being a PR. diff_url = self.diff_url elif self.before != '0000000000000000000000000000000000000000': - # There is a valid before SHA to compare with, or this is a release being created + # There is a valid before SHA to compare with, or this is a release being created. diff_url = f'{self.repos_url}{self.repo}/compare/{self.before}...{self.sha}' elif len(self.commits) == 1: - # There is only one commit + # There is only one commit. diff_url = f'{self.repos_url}{self.repo}/commits/{self.sha}' else: - # There are several commits: compare with the oldest one - oldest = sorted(self.commits, key=self.get_timestamp)[0]['id'] + # There are several commits: compare with the oldest one. + oldest = sorted(self.commits, key=self._get_timestamp)[0]['id'] diff_url = f'{self.repos_url}{self.repo}/compare/{oldest}...{self.sha}' diff_headers = { 'Accept': 'application/vnd.github.v3.diff', - 'Authorization': f'token {self.token}' + 'Authorization': f'token {self.token}', + 'X-GitHub-Api-Version': '2022-11-28' } diff_request = requests.get(url=diff_url, headers=diff_headers) if diff_request.status_code == 200: return diff_request.text raise Exception('Could not retrieve diff. Operation will abort.') + # noinspection PyMethodMayBeStatic + def _get_timestamp(self, commit): + """Get a commit timestamp.""" + return commit.get('timestamp') + + def _get_milestones(self, page=1): + """Get all the milestones.""" + params = { + 'per_page': 100, + 'page': page, + 'state': 'open' + } + milestones_request = requests.get(self.milestones_url, headers=self.issue_headers, params=params) + if milestones_request.status_code == 200: + self.milestones.extend(milestones_request.json()) + links = milestones_request.links + if 'next' in links: + self._get_milestones(page + 1) + + def _get_milestone(self, title): + """Get the milestone number for the one with this title (creating one if it doesn't exist).""" + for m in self.milestones: + if m['title'] == title: + return m['number'] + else: + return self._create_milestone(title) + + def _create_milestone(self, title): + """Create a new milestone with this title.""" + milestone_data = { + 'title': title + } + milestone_request = requests.post(self.milestones_url, headers=self.issue_headers, json=milestone_data) + return milestone_request.json()['number'] if milestone_request.status_code == 201 else None + def _get_existing_issues(self, page=1): """Populate the existing issues list.""" params = { 'per_page': 100, 'page': page, - 'state': 'open', - 'labels': 'todo' + 'state': 'open' } list_issues_request = requests.get(self.issues_url, headers=self.issue_headers, params=params) if list_issues_request.status_code == 200: @@ -109,14 +160,110 @@ class GitHubClient(object): if 'next' in links: self._get_existing_issues(page + 1) + def _get_project_id(self, project): + """Get the project ID.""" + project_type, owner, project_name = project.split('/') + if project_type == 'user': + query = """ + query($owner: String!) { + user(login: $owner) { + projectsV2(first: 10) { + nodes { + id + title + } + } + } + } + """ + elif project_type == 'organization': + query = """ + query($owner: String!) { + organization(login: $owner) { + projectsV2(first: 10) { + nodes { + id + title + } + } + } + } + """ + else: + print("Invalid project type") + return None + + variables = { + 'owner': owner, + } + project_request = requests.post('https://api.github.com/graphql', + json={'query': query, 'variables': variables}, + headers=self.graphql_headers) + if project_request.status_code == 200: + projects = (project_request.json().get('data', {}).get(project_type, {}).get('projectsV2', {}) + .get('nodes', [])) + for project in projects: + if project['title'] == project_name: + return project['id'] + return None + + def _get_issue_global_id(self, owner, repo, issue_number): + """Get the global ID for a given issue.""" + query = """ + query($owner: String!, $repo: String!, $issue_number: Int!) { + repository(owner: $owner, name: $repo) { + issue(number: $issue_number) { + id + } + } + } + """ + variables = { + 'owner': owner, + 'repo': repo, + 'issue_number': issue_number + } + project_request = requests.post('https://api.github.com/graphql', + json={'query': query, 'variables': variables}, + headers=self.graphql_headers) + if project_request.status_code == 200: + return project_request.json()['data']['repository']['issue']['id'] + return None + + def _add_issue_to_project(self, issue_id, project_id): + """Attempt to add this issue to a project.""" + mutation = """ + mutation($projectId: ID!, $contentId: ID!) { + addProjectV2ItemById(input: {projectId: $projectId, contentId: $contentId}) { + item { + id + } + } + } + """ + variables = { + "projectId": project_id, + "contentId": issue_id + } + project_request = requests.post('https://api.github.com/graphql', + json={'query': mutation, 'variables': variables}, + headers=self.graphql_headers) + return project_request.status_code + + def _comment_issue(self, issue_number, comment): + """Post a comment on an issue.""" + issue_comment_url = f'{self.repos_url}{self.repo}/issues/{issue_number}/comments' + body = {'body': comment} + update_issue_request = requests.post(issue_comment_url, headers=self.issue_headers, json=body) + return update_issue_request.status_code + def create_issue(self, issue): """Create a dict containing the issue details and send it to GitHub.""" - title = issue.title - if len(title) > 80: - # Title is too long. - title = title[:80] + '...' formatted_issue_body = self.line_break.join(issue.body) - url_to_line = f'{self.base_url}{self.repo}/blob/{self.sha}/{issue.file_name}#L{issue.start_line}' + line_num_anchor = f'#L{issue.start_line}' + if issue.num_lines > 1: + line_num_anchor += f'-L{issue.start_line + issue.num_lines - 1}' + url_to_line = f'{self.line_base_url}{self.repo}/blob/{self.sha}/{issue.file_name}{line_num_anchor}' snippet = '```' + issue.markdown_language + '\n' + issue.hunk + '\n' + '```' issue_template = os.getenv('INPUT_ISSUE_TEMPLATE', None) @@ -130,13 +277,32 @@ class GitHubClient(object): issue_contents = formatted_issue_body + '\n\n' + url_to_line + '\n\n' + snippet else: issue_contents = url_to_line + '\n\n' + snippet - # Check if the current issue already exists - if so, skip it. - # The below is a simple and imperfect check based on the issue title. - for existing_issue in self.existing_issues: - if issue.title == existing_issue['title']: - print(f'Skipping issue (already exists).') - return + endpoint = self.issues_url + if issue.issue_url: + # Issue already exists, update existing rather than create new. + endpoint += f'/{issue.issue_number}' + + title = issue.title + + if issue.ref: + if issue.ref.startswith('@'): + # Ref = assignee. + issue.assignees.append(issue.ref.lstrip('@')) + elif issue.ref.startswith('!'): + # Ref = label. + issue.labels.append(issue.ref.lstrip('!')) + elif issue.ref.startswith('#'): + # Ref = issue number (indicating this is a comment on that issue). + issue_number = issue.ref.lstrip('#') + if issue_number.isdigit(): + # Create the comment now. + return self._comment_issue(issue_number, f'{issue.title}\n\n{issue_contents}'), None + else: + # Just prepend the ref to the title. + title = f'[{issue.ref}] {issue.title}' + + title = title + '...' if len(title) > 80 else title new_issue_body = {'title': title, 'body': issue_contents, 'labels': issue.labels} # We need to check if any assignees/milestone specified exist, otherwise issue creation will fail. @@ -153,128 +319,77 @@ class GitHubClient(object): new_issue_body['assignees'] = valid_assignees if issue.milestone: - milestone_url = f'{self.repos_url}{self.repo}/milestones/{issue.milestone}' - milestone_request = requests.get(url=milestone_url, headers=self.issue_headers) - if milestone_request.status_code == 200: - new_issue_body['milestone'] = issue.milestone + milestone_number = self._get_milestone(issue.milestone) + if milestone_number: + new_issue_body['milestone'] = milestone_number else: - print(f'Milestone {issue.milestone} does not exist! Dropping this parameter!') + print(f'Milestone {issue.milestone} could not be set. Dropping this milestone!') - new_issue_request = requests.post(url=self.issues_url, headers=self.issue_headers, - data=json.dumps(new_issue_body)) + if issue.issue_url: + # Update existing issue. + issue_request = requests.patch(url=endpoint, headers=self.issue_headers, json=new_issue_body) + else: + # Create new issue. + issue_request = requests.post(url=endpoint, headers=self.issue_headers, json=new_issue_body) - # Check if we should assign this issue to any projects. - if new_issue_request.status_code == 201 and (len(issue.user_projects) > 0 or len(issue.org_projects) > 0): - issue_json = new_issue_request.json() - issue_id = issue_json['id'] + request_status = issue_request.status_code + issue_number = issue_request.json()['number'] if request_status in [200, 201] else None - if len(issue.user_projects) > 0: - self.add_issue_to_projects(issue_id, issue.user_projects, 'user') - if len(issue.org_projects) > 0: - self.add_issue_to_projects(issue_id, issue.org_projects, 'org') + # Check if issue should be added to a project now it exists. + if issue_number and self.project: + project_id = self._get_project_id(self.project) + if project_id: + owner, repo = self.repo.split('/') + issue_id = self._get_issue_global_id(owner, repo, issue_number) + if issue_id: + self._add_issue_to_project(issue_id, project_id) - return new_issue_request.status_code + return request_status, issue_number def close_issue(self, issue): """Check to see if this issue can be found on GitHub and if so close it.""" - matched = 0 issue_number = None - for existing_issue in self.existing_issues: - # This is admittedly a simple check that may not work in complex scenarios, but we can't deal with them yet. - if existing_issue['title'] == issue.title: - matched += 1 - # If there are multiple issues with similar titles, don't try and close any. - if matched > 1: - print(f'Skipping issue (multiple matches)') - break - issue_number = existing_issue['number'] + if issue.issue_number: + # If URL insertion is enabled. + issue_number = issue.issue_number else: - # The titles match, so we will try and close the issue. - update_issue_url = f'{self.repos_url}{self.repo}/issues/{issue_number}' + # Try simple matching. + matched = 0 + for existing_issue in self.existing_issues: + if existing_issue['title'] == issue.title: + matched += 1 + # If there are multiple issues with similar titles, don't try and close any. + if matched > 1: + print(f'Skipping issue (multiple matches)') + break + issue_number = existing_issue['number'] + if issue_number: + update_issue_url = f'{self.issues_url}/{issue_number}' body = {'state': 'closed'} - requests.patch(update_issue_url, headers=self.issue_headers, data=json.dumps(body)) + requests.patch(update_issue_url, headers=self.issue_headers, json=body) + request_status = self._comment_issue(issue_number, f'Closed in {self.sha}.') - issue_comment_url = f'{self.repos_url}{self.repo}/issues/{issue_number}/comments' - body = {'body': f'Closed in {self.sha}'} - update_issue_request = requests.post(issue_comment_url, headers=self.issue_headers, - data=json.dumps(body)) - return update_issue_request.status_code + # Update the description if this is a PR. + if os.getenv('GITHUB_EVENT_NAME') == 'pull_request': + pr_number = os.getenv('PR_NUMBER') + if pr_number: + request_status = self._update_pr_body(pr_number, body) + return request_status return None - def add_issue_to_projects(self, issue_id, projects, projects_type): - """Attempt to add this issue to the specified user or organisation projects.""" - projects_secret = os.getenv('INPUT_PROJECTS_SECRET', None) - if not projects_secret: - print('You need to create and set PROJECTS_SECRET to use projects') - return - projects_headers = { - 'Accept': 'application/vnd.github.inertia-preview+json', - 'Authorization': f'token {projects_secret}' - } - - # Loop through all the projects that we should assign this issue to. - for i, project in enumerate(projects): - print(f'Adding issue to {projects_type} project {i + 1} of {len(projects)}') - project = project.replace(' / ', '/') - try: - entity_name, project_name, column_name = project.split('/') - except ValueError: - print('Invalid project syntax') - continue - entity_name = entity_name.strip() - project_name = project_name.strip() - column_name = column_name.strip() - - if projects_type == 'user': - projects_url = f'{self.base_url}users/{entity_name}/projects' - elif projects_type == 'org': - projects_url = f'{self.base_url}orgs/{entity_name}/projects' - else: - return - - # We need to use the project name to get its ID. - projects_request = requests.get(url=projects_url, headers=projects_headers) - if projects_request.status_code == 200: - projects_json = projects_request.json() - for project_dict in projects_json: - if project_dict['name'].lower() == project_name.lower(): - project_id = project_dict['id'] - break - else: - print('Project does not exist, skipping') - continue - else: - print('An error occurred, skipping') - continue - - # Use the project ID and column name to get the column ID. - columns_url = f'{self.base_url}projects/{project_id}/columns' - columns_request = requests.get(url=columns_url, headers=projects_headers) - if columns_request.status_code == 200: - columns_json = columns_request.json() - for column_dict in columns_json: - if column_dict['name'].lower() == column_name.lower(): - column_id = column_dict['id'] - break - else: - print('Column does not exist, skipping') - continue - else: - print('An error occurred, skipping') - continue - - # Use the column ID to assign the issue to the project. - new_card_url = f'{self.base_url}projects/columns/{column_id}/cards' - new_card_body = { - 'content_id': int(issue_id), - 'content_type': 'Issue' - } - new_card_request = requests.post(url=new_card_url, headers=projects_headers, - data=json.dumps(new_card_body)) - if new_card_request.status_code == 201: - print('Issue card added to project') - else: - print('Issue card could not be added to project') + def _update_pr_body(self, pr_number, issue_number): + """Add a close message for an issue to a PR.""" + pr_url = f'{self.repos_url}{self.repo}/pulls/{pr_number}' + pr_request = requests.get(pr_url, headers=self.issue_headers) + if pr_request.status_code == 200: + pr_body = pr_request.json()['body'] + close_message = f'Closes #{issue_number}' + if close_message not in pr_body: + updated_pr_body = f'{pr_body}\n\n{close_message}' if pr_body.strip() else close_message + body = {'body': updated_pr_body} + pr_update_request = requests.patch(pr_url, headers=self.issue_headers, json=body) + return pr_update_request.status_code + return pr_request.status_code class TodoParser(object): @@ -288,13 +403,15 @@ class TodoParser(object): ADDITION_PATTERN = re.compile(r'(?<=^\+).*') DELETION_PATTERN = re.compile(r'(?<=^-).*') REF_PATTERN = re.compile(r'.+?(?=\))') - LABELS_PATTERN = re.compile(r'(?<=labels:\s).+') - ASSIGNEES_PATTERN = re.compile(r'(?<=assignees:\s).+') - MILESTONE_PATTERN = re.compile(r'(?<=milestone:\s).+') - USER_PROJECTS_PATTERN = re.compile(r'(?<=user projects:\s).+') - ORG_PROJECTS_PATTERN = re.compile(r'(?<=org projects:\s).+') + LABELS_PATTERN = re.compile(r'(?<=labels:\s).+', re.IGNORECASE) + ASSIGNEES_PATTERN = re.compile(r'(?<=assignees:\s).+', re.IGNORECASE) + MILESTONE_PATTERN = re.compile(r'(?<=milestone:\s).+', re.IGNORECASE) + ISSUE_URL_PATTERN = re.compile(r'(?<=Issue URL:\s).+', re.IGNORECASE) + ISSUE_NUMBER_PATTERN = re.compile(r'/issues/(\d+)', re.IGNORECASE) def __init__(self): + # Determine if the issues should be escaped. + self.should_escape = os.getenv('INPUT_ESCAPE', 'true') == 'true' # Load any custom identifiers, otherwise use the default. custom_identifiers = os.getenv('INPUT_IDENTIFIERS') self.identifiers = ['TODO'] @@ -303,7 +420,7 @@ class TodoParser(object): try: custom_identifiers_dict = json.loads(custom_identifiers) for identifier_dict in custom_identifiers_dict: - if type(identifier_dict['name']) != str or type(identifier_dict['labels']) != list: + if type(identifier_dict['name']) is not str or type(identifier_dict['labels']) is not list: raise TypeError self.identifiers = [identifier['name'] for identifier in custom_identifiers_dict] self.identifiers_dict = custom_identifiers_dict @@ -311,24 +428,84 @@ class TodoParser(object): print('Invalid identifiers dict, ignoring.') self.languages_dict = None + # Check if the standard collections should be loaded. + if os.getenv('INPUT_NO_STANDARD', 'false') != 'true': + # Load the languages data for ascertaining file types. + languages_url = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml' + languages_request = requests.get(url=languages_url) + if languages_request.status_code == 200: + languages_data = languages_request.text + yaml = YAML(typ='safe') + self.languages_dict = yaml.load(languages_data) + else: + raise Exception('Cannot retrieve languages data. Operation will abort.') - # Load the languages data for ascertaining file types. - languages_url = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml' - languages_request = requests.get(url=languages_url) - if languages_request.status_code == 200: - languages_data = languages_request.text - yaml = YAML(typ='safe') - self.languages_dict = yaml.load(languages_data) + # Load the comment syntax data for identifying comments. + syntax_url = 'https://raw.githubusercontent.com/alstr/todo-to-issue-action/master/syntax.json' + syntax_request = requests.get(url=syntax_url) + if syntax_request.status_code == 200: + self.syntax_dict = syntax_request.json() + else: + raise Exception('Cannot retrieve syntax data. Operation will abort.') else: - raise Exception('Cannot retrieve languages data. Operation will abort.') + self.syntax_dict = [] + self.languages_dict = {} - # Load the comment syntax data for identifying comments. - syntax_url = 'https://raw.githubusercontent.com/alstr/todo-to-issue-action/master/syntax.json' - syntax_request = requests.get(url=syntax_url) - if syntax_request.status_code == 200: - self.syntax_dict = syntax_request.json() - else: - raise Exception('Cannot retrieve syntax data. Operation will abort.') + custom_languages = os.getenv('INPUT_LANGUAGES', '') + if custom_languages != '': + # Load all custom languages. + for path in custom_languages.split(','): + # noinspection PyBroadException + try: + # Decide if the path is a url or local file. + if path.startswith('http'): + languages_request = requests.get(path) + if languages_request.status_code != 200: + print(f'Cannot retrieve custom language file "{path}".') + continue + data = languages_request.json() + else: + path = os.path.join(os.getcwd(), path) + if not os.path.exists(path) or not os.path.isfile(path): + print(f'Cannot retrieve custom language file "{path}".') + continue + f = open(path) + data = json.load(f) + + # Iterate through the definitions. + for lang in data: + # Add/replace the language definition. + self.languages_dict[lang['language']] = {} + self.languages_dict[lang['language']]['type'] = '' + self.languages_dict[lang['language']]['color'] = '' + self.languages_dict[lang['language']]['extensions'] = lang['extensions'] + self.languages_dict[lang['language']]['source'] = '' + self.languages_dict[lang['language']]['ace_mode'] = 'text' + self.languages_dict[lang['language']]['language_id'] = 0 + + # Check if comment syntax for the language name already exists. + counter = 0 + exists = False + for syntax in self.syntax_dict: + if syntax['language'] == lang['language']: + exists = True + break + + counter = counter + 1 + + if exists: + # When the syntax exists it will be popped out of the list. + self.syntax_dict.pop(counter) + + # And be replaced with the new syntax definition. + self.syntax_dict.append({ + 'language': lang['language'], + 'markers': lang['markers'] + }) + except Exception: + print(f'An error occurred in the custom language file "{path}".') + print('Please check the file, or if it represents undefined behavior, ' + 'create an issue at https://github.com/alstr/todo-to-issue-action/issues.') # noinspection PyTypeChecker def parse(self, diff_file): @@ -366,12 +543,12 @@ class TodoParser(object): continue curr_markers, curr_markdown_language = self._get_file_details(curr_file) if not curr_markers or not curr_markdown_language: - print(f'Could not check {curr_file} for TODOs as this language is not yet supported by default.') + print(f'Could not check "{curr_file}" for TODOs as this language is not yet supported by default.') continue # Break this section down into individual changed code blocks. - line_numbers = re.finditer(self.LINE_NUMBERS_PATTERN, hunk) - for i, line_numbers in enumerate(line_numbers): + line_numbers_iterator = re.finditer(self.LINE_NUMBERS_PATTERN, hunk) + for i, line_numbers in enumerate(line_numbers_iterator): line_numbers_inner_search = re.search(self.LINE_NUMBERS_INNER_PATTERN, line_numbers.group(0)) line_numbers_str = line_numbers_inner_search.group(0).strip('@@ -') start_line = line_numbers_str.split(' ')[1].strip('+') @@ -391,6 +568,7 @@ class TodoParser(object): prev_index = len(code_blocks) - 1 # Set the end of the last code block based on the start of this one. if prev_block and prev_block['file'] == block['file']: + # noinspection PyTypedDict code_blocks[prev_index]['hunk_end'] = line_numbers.start() code_blocks[prev_index]['hunk'] = (prev_block['hunk'] [prev_block['hunk_start']:line_numbers.start()]) @@ -410,21 +588,48 @@ class TodoParser(object): for marker in block['markers']: # Check if there are line or block comments. if marker['type'] == 'line': - comment_pattern = r'(^[+\-\s].*' + marker['pattern'] + r'\s.+$)' + # Add a negative lookup to include the second character from alternative comment patterns. + # This step is essential to handle cases like in Julia, where '#' and '#=' are comment patterns. + # It ensures that when a space after the comment is optional ('\s' => '\s*'), + # the second character would be matched because of the any character expression ('.+'). + suff_escape_list = [] + pref_escape_list = [] + for to_escape in block['markers']: + if to_escape['type'] == 'line': + if to_escape['pattern'] == marker['pattern']: + continue + if marker['pattern'][0] == to_escape['pattern'][0]: + suff_escape_list.append(self._extract_character(to_escape['pattern'], 1)) + else: + # Block comments and line comments cannot have the same comment pattern, + # so a check if the string is the same is unnecessary. + if to_escape['pattern']['start'][0] == marker['pattern'][0]: + suff_escape_list.append(self._extract_character(to_escape['pattern']['start'], 1)) + search = to_escape['pattern']['end'].find(marker['pattern']) + if search != -1: + pref_escape_list.append(self._extract_character(to_escape['pattern']['end'], + search - 1)) + + comment_pattern = (r'(^.*' + + (r'(? 0 + else '') + + marker['pattern'] + + (r'(?!(' + '|'.join(suff_escape_list) + r'))' if len(suff_escape_list) > 0 + else '') + + r'\s*.+$)') comments = re.finditer(comment_pattern, block['hunk'], re.MULTILINE) extracted_comments = [] prev_comment = None for i, comment in enumerate(comments): - if i == 0 or re.search('|'.join(self.identifiers), comment.group(0)): - extracted_comments.append([comment]) + if prev_comment and comment.start() == prev_comment.end() + 1: + extracted_comments[len(extracted_comments) - 1].append(comment) else: - if comment.start() == prev_comment.end() + 1: - extracted_comments[len(extracted_comments) - 1].append(comment) + extracted_comments.append([comment]) prev_comment = comment for comment in extracted_comments: - issue = self._extract_issue_if_exists(comment, marker, block) - if issue: - issues.append(issue) + extracted_issues = self._extract_issue_if_exists(comment, marker, block) + if extracted_issues: + issues.extend(extracted_issues) else: comment_pattern = (r'(?:[+\-\s]\s*' + marker['pattern']['start'] + r'.*?' + marker['pattern']['end'] + ')') @@ -435,12 +640,10 @@ class TodoParser(object): extracted_comments.append([comment]) for comment in extracted_comments: - issue = self._extract_issue_if_exists(comment, marker, block) - if issue: - issues.append(issue) + extracted_issues = self._extract_issue_if_exists(comment, marker, block) + if extracted_issues: + issues.extend(extracted_issues) - default_user_projects = os.getenv('INPUT_USER_PROJECTS', None) - default_org_projects = os.getenv('INPUT_ORG_PROJECTS', None) for i, issue in enumerate(issues): # Strip some of the diff symbols so it can be included as a code snippet in the issue body. # Strip removed lines. @@ -451,95 +654,164 @@ class TodoParser(object): cleaned_hunk = re.sub(r'\n\sNo newline at end of file', '', cleaned_hunk, 0, re.MULTILINE) issue.hunk = cleaned_hunk - # If no projects have been specified for this issue, assign any default projects that exist. - if len(issue.user_projects) == 0 and default_user_projects is not None: - separated_user_projects = self._get_projects(f'user projects: {default_user_projects}', 'user') - issue.user_projects = separated_user_projects - if len(issue.org_projects) == 0 and default_org_projects is not None: - separated_org_projects = self._get_projects(f'org projects: {default_org_projects}', 'org') - issue.org_projects = separated_org_projects return issues + def _get_language_details(self, language_name, attribute, value): + """Try and get the Markdown language and comment syntax data based on a specified attribute of the language.""" + attributes = [at.lower() for at in self.languages_dict[language_name][attribute]] + if value.lower() in attributes: + for syntax_details in self.syntax_dict: + if syntax_details['language'] == language_name: + return syntax_details['markers'], self.languages_dict[language_name]['ace_mode'] + return None, None + def _get_file_details(self, file): """Try and get the Markdown language and comment syntax data for the given file.""" file_name, extension = os.path.splitext(os.path.basename(file)) for language_name in self.languages_dict: - if 'extensions' in self.languages_dict[language_name]: - language_extensions = [ex.lower() for ex in self.languages_dict[language_name]['extensions']] - if extension.lower() in language_extensions: - for syntax_details in self.syntax_dict: - if syntax_details['language'] == language_name: - return syntax_details['markers'], self.languages_dict[language_name]['ace_mode'] + # Check if the file extension matches the language's extensions. + if extension != '' and 'extensions' in self.languages_dict[language_name]: + syntax_details, ace_mode = self._get_language_details(language_name, 'extensions', extension) + if syntax_details is not None and ace_mode is not None: + return syntax_details, ace_mode + # Check if the file name matches the language's filenames. + if 'filenames' in self.languages_dict[language_name]: + syntax_details, ace_mode = self._get_language_details(language_name, 'filenames', file_name) + if syntax_details is not None and ace_mode is not None: + return syntax_details, ace_mode return None, None def _extract_issue_if_exists(self, comment, marker, code_block): """Check this comment for TODOs, and if found, build an Issue object.""" - issue = None + curr_issue = None + found_issues = [] + line_statuses = [] + prev_line_title = False for match in comment: - lines = match.group().split('\n') - for line in lines: + comment_lines = match.group().split('\n') + for line in comment_lines: line_status, committed_line = self._get_line_status(line) + line_statuses.append(line_status) cleaned_line = self._clean_line(committed_line, marker) line_title, ref, identifier = self._get_title(cleaned_line) if line_title: - if ref: - issue_title = f'[{ref}] {line_title}' - else: - issue_title = line_title - issue = Issue( - title=issue_title, - labels=['todo'], + if prev_line_title and line_status == line_statuses[-2]: + # This means that there is a separate one-line TODO directly above this one. + # We need to store the previous one. + curr_issue.status = line_status + found_issues.append(curr_issue) + curr_issue = Issue( + title=line_title, + labels=[], assignees=[], milestone=None, - user_projects=[], - org_projects=[], body=[], hunk=code_block['hunk'], file_name=code_block['file'], start_line=code_block['start_line'], + num_lines=1, markdown_language=code_block['markdown_language'], status=line_status, - identifier=identifier + identifier=identifier, + ref=ref, + issue_url=None, + issue_number=None ) + prev_line_title = True # Calculate the file line number that this issue references. hunk_lines = re.finditer(self.LINE_PATTERN, code_block['hunk'], re.MULTILINE) start_line = code_block['start_line'] for i, hunk_line in enumerate(hunk_lines): if hunk_line.group(0) == line: - issue.start_line = start_line + curr_issue.start_line = start_line break if i != 0 and (hunk_line.group(0).startswith('+') or not hunk_line.group(0).startswith('-')): start_line += 1 - elif issue: - # Extract other issue information that may exist. + elif curr_issue: + # Extract other issue information that may exist below the title. line_labels = self._get_labels(cleaned_line) line_assignees = self._get_assignees(cleaned_line) line_milestone = self._get_milestone(cleaned_line) - user_projects = self._get_projects(cleaned_line, 'user') - org_projects = self._get_projects(cleaned_line, 'org') + line_url = self._get_issue_url(cleaned_line) if line_labels: - issue.labels.extend(line_labels) + curr_issue.labels.extend(line_labels) elif line_assignees: - issue.assignees.extend(line_assignees) - elif line_milestone and not issue.milestone: - issue.milestone = line_milestone - elif user_projects: - issue.user_projects.extend(user_projects) - elif org_projects: - issue.org_projects.extend(org_projects) - elif len(cleaned_line): - issue.body.append(cleaned_line) - - if issue is not None and issue.identifier is not None and self.identifiers_dict is not None: + curr_issue.assignees.extend(line_assignees) + elif line_milestone: + curr_issue.milestone = line_milestone + elif line_url: + curr_issue.issue_url = line_url + issue_number_search = self.ISSUE_NUMBER_PATTERN.search(line_url) + if issue_number_search: + curr_issue.issue_number = issue_number_search.group(1) + elif len(cleaned_line) and line_status != LineStatus.DELETED: + if self.should_escape: + curr_issue.body.append(self._escape_markdown(cleaned_line)) + else: + curr_issue.body.append(cleaned_line) + if not line.startswith('-'): + curr_issue.num_lines += 1 + if not line_title: + prev_line_title = False + if curr_issue is not None and curr_issue.identifier is not None and self.identifiers_dict is not None: for identifier_dict in self.identifiers_dict: - if identifier_dict['name'] == issue.identifier: + if identifier_dict['name'] == curr_issue.identifier: for label in identifier_dict['labels']: - if label not in issue.labels: - issue.labels.append(label) + if label not in curr_issue.labels: + curr_issue.labels.append(label) - return issue + if curr_issue is not None: + # If all the lines are unchanged, don't do anything. + if all(s == LineStatus.UNCHANGED for s in line_statuses): + return None + # If the title line hasn't changed, but the info below has, we need to mark it as an update (addition). + if (curr_issue.status == LineStatus.UNCHANGED + and (LineStatus.ADDED in line_statuses or LineStatus.DELETED in line_statuses)): + curr_issue.status = LineStatus.ADDED + + found_issues.append(curr_issue) + + return found_issues + + @staticmethod + def _escape_markdown(comment): + # All basic characters according to: https://www.markdownguide.org/basic-syntax + must_escape = ['\\', '<', '>', '#', '`', '*', '_', '[', ']', '(', ')', '!', '+', '-', '.', '|', '{', '}', '~', + '='] + + escaped = '' + + # Linear Escape Algorithm, because the algorithm ends in an infinite loop when using the function 'replace', + # which tries to replace all backslashes with duplicate backslashes, i.e. also the already other escaped + # characters. + for c in comment: + if c in must_escape: + escaped += '\\' + c + else: + escaped += c + return escaped + + @staticmethod + def _extract_character(input_str, pos): + # Extracts a character from the input string at the specified position, + # considering escape sequences when applicable. + # Test cases + # print(_extract_character("/\\*", 1)) # Output: "\*" + # print(_extract_character("\\*", 0)) # Output: "\*" + # print(_extract_character("\\", 0)) # Output: "\\" + # print(_extract_character("w", 0)) # Output: "w" + # print(_extract_character("wa", 1)) # Output: "a" + # print(_extract_character("\\\\w", 1)) # Output: "\\" + if input_str[pos] == '\\': + if pos >= 1 and not input_str[pos - 1] == '\\' and len(input_str) > pos + 1: + return '\\' + input_str[pos + 1] + return '\\\\' + if pos >= 1: + if input_str[pos - 1] == '\\': + return '\\' + input_str[pos] + return input_str[pos] def _get_line_status(self, comment): """Return a Tuple indicating whether this is an addition/deletion/unchanged, plus the cleaned comment.""" @@ -576,13 +848,13 @@ class TodoParser(object): title_identifier = None for identifier in self.identifiers: title_identifier = identifier - title_pattern = re.compile(r'(?<=' + identifier + r'[\s:]).+') + title_pattern = re.compile(fr'(?<={identifier}[\s:]).+', re.IGNORECASE) title_search = title_pattern.search(comment, re.IGNORECASE) if title_search: - title = title_search.group(0).strip() + title = title_search.group(0).strip(': ') break else: - title_ref_pattern = re.compile(r'(?<=' + identifier + r'\().+') + title_ref_pattern = re.compile(fr'(?<={identifier}\().+', re.IGNORECASE) title_ref_search = title_ref_pattern.search(comment, re.IGNORECASE) if title_ref_search: title = title_ref_search.group(0).strip() @@ -593,6 +865,16 @@ class TodoParser(object): break return title, ref, title_identifier + def _get_issue_url(self, comment): + """Check the passed comment for a GitHub issue URL.""" + url_search = self.ISSUE_URL_PATTERN.search(comment, re.IGNORECASE) + url = None + if url_search: + url = url_search.group(0) + parsed_url = urlparse(url) + return url if all([parsed_url.scheme, parsed_url.netloc]) else None + return url + def _get_labels(self, comment): """Check the passed comment for issue labels.""" labels_search = self.LABELS_PATTERN.search(comment, re.IGNORECASE) @@ -617,24 +899,9 @@ class TodoParser(object): milestone = None if milestone_search: milestone = milestone_search.group(0) - if milestone.isdigit(): - milestone = int(milestone) return milestone - def _get_projects(self, comment, projects_type): - """Check the passed comment for projects to link the issue to.""" - projects = [] - if projects_type == 'user': - projects_search = self.USER_PROJECTS_PATTERN.search(comment, re.IGNORECASE) - elif projects_type == 'org': - projects_search = self.ORG_PROJECTS_PATTERN.search(comment, re.IGNORECASE) - else: - return projects - if projects_search: - projects = projects_search.group(0).replace(', ', ',') - projects = list(filter(None, projects.split(','))) - return projects - + # noinspection PyMethodMayBeStatic def _should_ignore(self, file): ignore_patterns = os.getenv('INPUT_IGNORE', None) if ignore_patterns: @@ -667,30 +934,79 @@ if __name__ == "__main__": # This is a simple, non-perfect check to filter out any TODOs that have just been moved. # It looks for items that appear in the diff as both an addition and deletion. # It is based on the assumption that TODOs will not have identical titles in identical files. + # That is about as good as we can do for TODOs without issue URLs. issues_to_process = [] for values, similar_issues in itertools.groupby(raw_issues, key=operator.attrgetter('title', 'file_name', 'markdown_language')): similar_issues = list(similar_issues) - if (len(similar_issues) == 2 and ((similar_issues[0].status == LineStatus.ADDED and - similar_issues[1].status == LineStatus.DELETED) or - (similar_issues[1].status == LineStatus.ADDED and - similar_issues[0].status == LineStatus.DELETED))): + if (len(similar_issues) == 2 and all(issue.issue_url is None for issue in similar_issues) + and ((similar_issues[0].status == LineStatus.ADDED + and similar_issues[1].status == LineStatus.DELETED) + or (similar_issues[1].status == LineStatus.ADDED + and similar_issues[0].status == LineStatus.DELETED))): print(f'Issue "{values[0]}" appears as both addition and deletion. ' f'Assuming this issue has been moved so skipping.') continue issues_to_process.extend(similar_issues) + + # If a TODO with an issue URL is updated, it may appear as both an addition and a deletion. + # We need to ignore the deletion so it doesn't update then immediately close the issue. + # First store TODOs based on their status. + todos_status = defaultdict(lambda: {'added': False, 'deleted': False}) + + # Populate the status dictionary based on the issue URL. + for raw_issue in issues_to_process: + if raw_issue.issue_url: # Ensuring we're dealing with TODOs that have an issue URL. + if raw_issue.status == LineStatus.ADDED: + todos_status[raw_issue.issue_url]['added'] = True + elif raw_issue.status == LineStatus.DELETED: + todos_status[raw_issue.issue_url]['deleted'] = True + + # Determine which issues are both added and deleted. + update_and_close_issues = set() + + for _issue_url, _status in todos_status.items(): + if _status['added'] and _status['deleted']: + update_and_close_issues.add(_issue_url) + + # Remove issues from issues_to_process if they are both to be updated and closed (i.e., ignore deletions). + issues_to_process = [issue for issue in issues_to_process if + not (issue.issue_url in update_and_close_issues and issue.status == LineStatus.DELETED)] + # Cycle through the Issue objects and create or close a corresponding GitHub issue for each. for j, raw_issue in enumerate(issues_to_process): print(f'Processing issue {j + 1} of {len(issues_to_process)}') if raw_issue.status == LineStatus.ADDED: - status_code = client.create_issue(raw_issue) + status_code, new_issue_number = client.create_issue(raw_issue) if status_code == 201: print('Issue created') + # Check to see if we should insert the issue URL back into the linked TODO. + # Don't insert URLs for comments. Comments do not get updated. + if client.insert_issue_urls and not (raw_issue.ref and raw_issue.ref.startswith('#')): + line_number = raw_issue.start_line - 1 + with open(raw_issue.file_name, 'r') as issue_file: + file_lines = issue_file.readlines() + if line_number < len(file_lines): + # Duplicate the line to retain the comment syntax. + new_line = file_lines[line_number] + remove = fr'{raw_issue.identifier}.*{raw_issue.title}' + insert = f'Issue URL: {client.line_base_url}{client.repo}/issues/{new_issue_number}' + new_line = re.sub(remove, insert, new_line) + # Check if the URL line already exists, if so abort. + if line_number == len(file_lines) - 1 or file_lines[line_number + 1] != new_line: + file_lines.insert(line_number + 1, new_line) + with open(raw_issue.file_name, 'w') as issue_file: + issue_file.writelines(file_lines) + elif status_code == 200: + print('Issue updated') else: print('Issue could not be created') elif raw_issue.status == LineStatus.DELETED and os.getenv('INPUT_CLOSE_ISSUES', 'true') == 'true': + if raw_issue.ref and raw_issue.ref.startswith('#'): + print('Issue looks like a comment, will not attempt to close.') + continue status_code = client.close_issue(raw_issue) - if status_code == 201: + if status_code in [200, 201]: print('Issue closed') else: print('Issue could not be closed') diff --git a/requirements.txt b/requirements.txt index 1a0b17f..30ce159 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,15 +1,3 @@ -attrs==22.1.0 -certifi==2022.12.07 -charset-normalizer==2.0.7 -exceptiongroup==1.0.0 -idna==3.3 -iniconfig==1.1.1 -packaging==21.3 -pluggy==1.0.0 -pyparsing==3.0.9 -pytest==7.2.0 -requests==2.26.0 -ruamel.yaml==0.17.17 -ruamel.yaml.clib==0.2.6 -tomli==2.0.1 -urllib3==1.26.7 +requests==2.32.3 +ruamel.yaml==0.18.6 +pytest==8.3.3 \ No newline at end of file diff --git a/syntax.json b/syntax.json index f5509f2..0de8d02 100644 --- a/syntax.json +++ b/syntax.json @@ -242,6 +242,13 @@ "start": "/\\*", "end": "\\*/" } + }, + { + "type": "block", + "pattern": { + "start": "{/\\*", + "end": "\\*/}" + } } ] }, @@ -357,6 +364,22 @@ } ] }, + { + "language": "PureScript", + "markers": [ + { + "type": "line", + "pattern": "--" + }, + { + "type": "block", + "pattern": { + "start": "{-", + "end": "-}" + } + } + ] + }, { "language": "Haskell", "markers": [ @@ -429,6 +452,22 @@ } ] }, + { + "language": "MDX", + "markers": [ + { + "type": "block", + "pattern": { + "start": "{/\\*", + "end": "\\*/}" + } + }, + { + "type": "line", + "pattern": "- \\[ \\]" + } + ] + }, { "language": "RMarkdown", "markers": [ @@ -741,5 +780,62 @@ "pattern": "'" } ] + }, + { + "language": "Cuda", + "markers": [ + { + "type": "line", + "pattern": "//" + }, + { + "type": "block", + "pattern": { + "start": "/\\*", + "end": "\\*/" + } + } + ] + }, + { + "language": "Makefile", + "markers": [ + { + "type": "line", + "pattern": "#" + } + ] + }, + { + "language": "Liquid", + "markers": [ + { + "type": "line", + "pattern": "#" + }, + { + "type": "block", + "pattern": { + "start": "{% comment %}", + "end": "{% endcomment %}" + } + } + ] + }, + { + "language": "Agda", + "markers": [ + { + "type": "line", + "pattern": "--" + }, + { + "type": "block", + "pattern": { + "start": "{-", + "end": "-}" + } + } + ] } ] diff --git a/tests/custom_languages.json b/tests/custom_languages.json new file mode 100644 index 0000000..2ac5c19 --- /dev/null +++ b/tests/custom_languages.json @@ -0,0 +1,40 @@ +[ + { + "language": "ILS", + "extensions": [ + ".ils" + ], + "markers": [ + { + "type": "line", + "pattern": "//" + }, + { + "type": "block", + "pattern": { + "start": "/\\*", + "end": "\\*/" + } + } + ] + }, + { + "language": "Java", + "extensions": [ + ".java2" + ], + "markers": [ + { + "type": "line", + "pattern": "////" + }, + { + "type": "block", + "pattern": { + "start": "+=", + "end": "=+" + } + } + ] + } +] \ No newline at end of file diff --git a/tests/test_closed.diff b/tests/test_closed.diff index fe7d092..86eca5f 100644 --- a/tests/test_closed.diff +++ b/tests/test_closed.diff @@ -153,12 +153,17 @@ index 0000000..7cccc5b --- /dev/null +++ b/src/tests/example_file.jl @@ -0,0 +1,2 @@ -- # TODO: Hopefully this comment turns into an issue +- #TODO: Hopefully this comment turns into an issue +- # TODO: Hopefully this comment also turns into an issue - print("Hello World") - #= TODO: Multiline comments - also need to be turned into task, and hopefully - kept together as one. - =# +- #=TODO: Another copied multiline comment +- also need to be turned into task, and hopefully +- kept together as one. +- =# diff --git a/tests/defs.bzl b/tests/defs.bzl index 525e25d..ba4e68d 100644 --- a/tests/defs.bzl @@ -384,11 +389,106 @@ index 6b0c6cf..b37e70a 100644 -' TODO: remove feature to prevent legal issues -Public Sub Plagiarize() +Public Sub MakeOriginalIdea() -diff --git a/tests/example.sht b/tests/example.sht -index 6b0c6cf..b37e70a 100644 ---- a/tests/example.sht -+++ b/tests/example.sht -@@ -1,2 +1,1 @@ --' TODO: switch to tea --Public Sub MakeCoffee() -+Public Sub MakeTea() +diff --git a/tests/example.cu b/tests/example.cu +index 0000000..a6c6cb0 +--- /dev/null ++++ b/tests/example.cu +@@ -1,9 +1,5 @@ +-// TODO rename this function + __global__ void test() { +- /* +- TODO fill this with something useful +- */ + } +diff --git a/tests/Makefile b/tests/Makefile +index 2996176..7545ccf 100644 +--- a/tests/Makefile ++++ b/tests/Makefile +@@ -1,12 +1,9 @@ +-# TODO change name. + NAME = asm + +-# TODO source files must be explicitly named. + SRC = $(shell find mandatory/src -name "*.asm") + OBJ = $(patsubst src/%.asm, obj/%.o, $(SRC)) + .PHONY: all + all: $(NAME) + $(NAME): $(OBJ) +- # TODO create the directory. + $(AR) rc $@ $(OBJ) + +diff --git a/tests/example_file.liquid b/tests/example_file.liquid +index 0000000..7cccc5b 100644 +--- a/tests/example_file.liquid ++++ b/tests/example_file.liquid +@@ -1,6 +0,0 @@ +-{% comment %} TODO: remove loop logic {% endcomment %} + {% for i in (1..3) -%} +- # TODO: Do math here! +- # labels: help wanted + {{ i }} + {% endfor %} +-{% comment %} +-TODO: Render Liquid file + {% assign featured_product = all_products["product_handle"] %} + {% render "product", product: featured_product %} +-{% endcomment %} +diff --git a/tests/example_file.tsx b/tests/example_file.tsx +index 0000000..7cccc5b 100644 +--- a/tests/example_file.tsx ++++ b/tests/example_file.tsx +@@ -1,7 +1,3 @@ +-// TODO: rename component +-export default async function Component() { ++export default async function MyComponent() { +- /* TODO: Use state here +- labels: client */ ++ const [data, setData] = useState(""); + + return ( +
+- {/* +- TODO: use styled component library +- */} ++ {data} +
+ ); + } +diff --git a/src/Swarm/Game/example.purs b/src/Swarm/Game/example.purs +index 525e25d..ba4e68d 100644 +--- a/src/Swarm/Game/example.purs ++++ b/src/Swarm/Game/example.purs +@@ -1,14 +0,0 @@ +--- | Standard devices that are always installed. +--- +--- TODO: Remove standard devices +--- In the future, make a way to build these and just start the base +--- out with a large supply of each? +--- labels: redesign +-stdDeviceList = +- ["treads", "grabber", "solar panel", "scanner", "plasma cutter"] +- +-{- | Very complicated function. +- +-TODO: Create an issue for TODO +--} +-sum a b = a + b +diff --git a/src/Swarm/Game/example.agda b/src/Swarm/Game/example.agda +index 525e25d..ba4e68d 100644 +--- a/src/Swarm/Game/example.agda ++++ b/src/Swarm/Game/example.agda +@@ -1,14 +0,0 @@ +--- | Standard devices that are always installed. +--- +--- TODO: Remove standard devices +--- In the future, make a way to build these and just start the base +--- out with a large supply of each? +--- labels: redesign +-stdDeviceList = +- ["treads", "grabber", "solar panel", "scanner", "plasma cutter"] +- +-{- | Very complicated function. +- +-TODO: Create an issue for TODO +--} +-sum a b = a + b diff --git a/tests/test_escape.diff b/tests/test_escape.diff new file mode 100644 index 0000000..f94bdfb --- /dev/null +++ b/tests/test_escape.diff @@ -0,0 +1,19 @@ +diff --git a/tests/ExampleFile.java b/tests/ExampleFile.java +index d340f6a..29b54da 100644 +--- a/tests/ExampleFile.java ++++ b/tests/ExampleFile.java +@@ -0,0 +1,13 @@ ++package com.mydomain.myapp; ++ ++public class JavaTests { ++ // TODO: Some Java ++ // # Some title ++ // ++ ++ /* ++ TODO: Definitely some Java ++ # Another title ++ ++ */ ++} +\ No newline at end of file \ No newline at end of file diff --git a/tests/test_new.diff b/tests/test_new.diff index afed07c..1e2de06 100644 --- a/tests/test_new.diff +++ b/tests/test_new.diff @@ -201,6 +201,20 @@ index 0000000..525e25d + # This function should probably do something more interesting + # labels: help wanted + pass +diff --git a/tests/BUILD.bazel b/tests/BUILD.bazel +new file mode 100644 +index 0000000..525e25d +--- /dev/null ++++ b/tests/BUILD.bazel +@@ -0,0 +1,23 @@ ++def hello_world(): ++ # TODO: Come up with a more imaginative greeting ++ print('Hello world') ++ ++ # TODO: Do more stuff ++ # This function should probably do something more interesting ++ # labels: help wanted ++ pass diff --git a/tests/example_file.ahk b/src/tests/example_file.ahk new file mode 100644 index 0000000..7cccc5b @@ -424,10 +438,111 @@ index 6b0c6cf..b37e70a 100644 @@ -1,1 +1,2 @@ +' TODO: remove feature to prevent legal issues Public Sub Plagiarize() -diff --git a/tests/example.sht b/tests/example.sht -index 6b0c6cf..b37e70a 100644 ---- a/tests/example.sht -+++ b/tests/example.sht -@@ -1,1 +1,2 @@ -+' TODO: switch to tea - Public Sub MakeCoffee() +diff --git a/tests/example.cu b/tests/example.cu +new file mode 100644 +index 0000000..a6c6cb0 +--- /dev/null ++++ b/tests/example.cu +@@ -1,3 +1,11 @@ ++ ++// TODO rename this function ++__global__ void test() { ++ /* ++ TODO fill this with something useful ++ */ ++} ++ +diff --git a/tests/Makefile b/tests/Makefile +new file mode 100644 +index 0000000..2996176 +--- /dev/null ++++ b/tests/Makefile +@@ -0,0 +1,12 @@ ++# TODO change name. ++NAME = asm ++ ++# TODO source files must be explicitly named. ++SRC = $(shell find mandatory/src -name "*.asm") ++OBJ = $(patsubst src/%.asm, obj/%.o, $(SRC)) ++.PHONY: all ++all: $(NAME) ++$(NAME): $(OBJ) ++ # TODO create the directory. ++ $(AR) rc $@ $(OBJ) ++ +diff --git a/tests/example_file.liquid b/tests/example_file.liquid +new file mode 100644 +index 0000000..7cccc5b +--- /dev/null ++++ b/tests/example_file.liquid +@@ -0,0 +1,11 @@ ++{% comment %} TODO: remove loop logic {% endcomment %} ++{% for i in (1..3) -%} ++ # TODO: Do math here! ++ # labels: help wanted ++ {{ i }} ++{% endfor %} ++{% comment %} ++TODO: Render Liquid file ++{% assign featured_product = all_products["product_handle"] %} ++{% render "product", product: featured_product %} ++{% endcomment %} +diff --git a/tests/example_file.tsx b/tests/example_file.tsx +new file mode 100644 +index 0000000..7cccc5b +--- /dev/null ++++ b/tests/example_file.tsx +@@ -0,0 +1,13 @@ ++// TODO: rename component ++export default async function Component() { ++ /* TODO: Use state here ++ labels: client */ ++ ++ return ( ++
++ {/* ++ TODO: use styled component library ++ */} ++
++ ); ++} +diff --git a/src/Swarm/Game/example.purs b/src/Swarm/Game/example.purs +new file mode 100644 +index 0000000..0ce9b1a +--- /dev/null ++++ b/src/Swarm/Game/example.purs +@@ -0,0 +1,14 @@ ++-- | Standard devices that are always installed. ++-- ++-- TODO: Remove standard devices ++-- In the future, make a way to build these and just start the base ++-- out with a large supply of each? ++-- labels: redesign ++stdDeviceList = ++ ["treads", "grabber", "solar panel", "scanner", "plasma cutter"] ++ ++{- | Very complicated function. ++ ++TODO: Create an issue for TODO ++-} ++sum a b = a + b +diff --git a/src/Swarm/Game/example.agda b/src/Swarm/Game/example.agda +new file mode 100644 +index 0000000..0ce9b1a +--- /dev/null ++++ b/src/Swarm/Game/example.agda +@@ -0,0 +1,14 @@ ++-- | Standard devices that are always installed. ++-- ++-- TODO: Remove standard devices ++-- In the future, make a way to build these and just start the base ++-- out with a large supply of each? ++-- labels: redesign ++stdDeviceList = ++ ["treads", "grabber", "solar panel", "scanner", "plasma cutter"] ++ ++{- | Very complicated function. ++ ++TODO: Create an issue for TODO ++-} ++sum a b = a + b diff --git a/tests/test_todo_parser.py b/tests/test_todo_parser.py index d540477..b9d99f3 100644 --- a/tests/test_todo_parser.py +++ b/tests/test_todo_parser.py @@ -23,8 +23,8 @@ class NewIssueTests(unittest.TestCase): self.raw_issues = parser.parse(diff_file) def test_python_issues(self): - # Includes 2 tests for Starlark. - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 6) + # Includes 4 tests for Starlark. + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 8) def test_yaml_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'yaml'), 2) @@ -35,11 +35,9 @@ class NewIssueTests(unittest.TestCase): def test_java_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'java'), 2) - def test_json_with_comments_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'javascript'), 2) - - def test_json5_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'javascript'), 2) + def test_javascript_issues(self): + # Includes 1 test for JSON with Comments, 1 test for JSON5, 3 tests for TSX. + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'javascript'), 5) def test_ruby_issues(self): # Includes 2 tests for Crystal. @@ -58,7 +56,7 @@ class NewIssueTests(unittest.TestCase): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'julia'), 2) def test_starlark_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 6) + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 8) def test_autohotkey_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'autohotkey'), 1) @@ -75,6 +73,9 @@ class NewIssueTests(unittest.TestCase): def test_twig_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'twig'), 2) + def test_makefile_issues(self): + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'makefile'), 3) + def test_md_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'markdown'), 8) @@ -82,7 +83,7 @@ class NewIssueTests(unittest.TestCase): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'r'), 2) def test_haskell_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'haskell'), 2) + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'haskell'), 4) def test_clojure_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'clojure'), 2) @@ -93,6 +94,11 @@ class NewIssueTests(unittest.TestCase): def test_xaml_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'xml'), 2) + def test_c_cpp_like_issues(self): + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'c_cpp'), 2) + + def test_liquid_issues(self): + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'liquid'), 3) class ClosedIssueTests(unittest.TestCase): # Check for removed TODOs across the files specified. @@ -130,16 +136,14 @@ class ClosedIssueTests(unittest.TestCase): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'tex'), 2) def test_julia_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'julia'), 2) + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'julia'), 4) def test_starlark_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'python'), 5) - def test_json_with_comments_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'javascript'), 2) - - def test_json5_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'javascript'), 2) + def test_javascript_issues(self): + # Includes 1 test for JSON with Comments, 1 test for JSON5, 3 tests for TSX. + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'javascript'), 5) def test_autohotkey_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'autohotkey'), 1) @@ -156,6 +160,9 @@ class ClosedIssueTests(unittest.TestCase): def test_twig_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'twig'), 2) + def test_makefile_issues(self): + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'makefile'), 3) + def test_md_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'markdown'), 8) @@ -163,7 +170,7 @@ class ClosedIssueTests(unittest.TestCase): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'r'), 2) def test_haskell_issues(self): - self.assertEqual(count_issues_for_file_type(self.raw_issues, 'haskell'), 2) + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'haskell'), 4) def test_clojure_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'clojure'), 2) @@ -173,6 +180,13 @@ class ClosedIssueTests(unittest.TestCase): def test_xaml_issues(self): self.assertEqual(count_issues_for_file_type(self.raw_issues, 'xml'), 2) + + def test_c_cpp_like_issues(self): + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'c_cpp'), 2) + + def test_liquid_issues(self): + self.assertEqual(count_issues_for_file_type(self.raw_issues, 'liquid'), 3) + class IgnorePatternTests(unittest.TestCase): @@ -205,3 +219,71 @@ class IgnorePatternTests(unittest.TestCase): # Includes 2 tests for Crystal. self.assertEqual(count_issues_for_file_type(self.raw_issues, 'ruby'), 5) os.environ['INPUT_IGNORE'] = '' + + +class EscapeMarkdownTest(unittest.TestCase): + def test_simple_escape(self): + os.environ['INPUT_ESCAPE'] = 'true' + parser = TodoParser() + with open('syntax.json', 'r') as syntax_json: + parser.syntax_dict = json.load(syntax_json) + diff_file = open('tests/test_escape.diff', 'r') + + # I had no other idea to make these checks dynamic. + self.raw_issues = parser.parse(diff_file) + self.assertEqual(len(self.raw_issues), 2) + + issue = self.raw_issues[0] + self.assertEqual(len(issue.body), 2) + self.assertEqual(issue.body[0], '\\# Some title') + self.assertEqual(issue.body[1], '\\') + + issue = self.raw_issues[1] + self.assertEqual(len(issue.body), 2) + self.assertEqual(issue.body[0], '\\# Another title') + self.assertEqual(issue.body[1], '\\') + + +class CustomLanguageTest(unittest.TestCase): + def test_custom_lang_load(self): + os.environ['INPUT_LANGUAGES'] = 'tests/custom_languages.json' + parser = TodoParser() + # Test if the custom language ILS is actually loaded into the system + self.assertIsNotNone(parser.languages_dict['ILS']) + self.assertEqual(self.count_syntax(parser, 'ILS'), 1) + + def test_custom_lang_not_dupplicate(self): + os.environ['INPUT_LANGUAGES'] = 'tests/custom_languages.json' + parser = TodoParser() + + # Test if a custom language can overwrite the rules of an existing one + self.assertEqual(self.count_syntax(parser, 'Java'), 1) + for syntax in parser.syntax_dict: + if syntax['language'] == 'Java': + self.assertEqual(len(syntax['markers']), 2) + self.assertEqual(syntax['markers'][0]['pattern'], "////") + self.assertEqual(syntax['markers'][1]['pattern']['start'], '+=') + self.assertEqual(syntax['markers'][1]['pattern']['end'], '=+') + break + + self.assertIsNotNone(parser.languages_dict['Java']) + self.assertEqual(len(parser.languages_dict['Java']['extensions']), 1) + self.assertEqual(parser.languages_dict['Java']['extensions'][0], ".java2") + + def test_url_load(self): + os.environ['INPUT_LANGUAGES'] = 'https://raw.githubusercontent.com/alstr/todo-to-issue-action/master/tests/custom_languages.json' + os.environ['INPUT_NO_STANDARD'] = 'true' + parser = TodoParser() + + self.assertEqual(len(parser.languages_dict), 2) + self.assertEqual(len(parser.syntax_dict), 2) + + @staticmethod + def count_syntax(parser: TodoParser, name: str): + counter = 0 + + for syntax in parser.syntax_dict: + if syntax['language'] == name: + counter = counter + 1 + + return counter