From 662435d7fc77b574ff7de82f8cc5b8126d0e6c22 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Wed, 23 Oct 2024 19:51:11 +0000 Subject: [PATCH 01/20] refactor: split into modules Split main.py into modules to (mostly) isolate the GitHub-specific code from the general TODO detection and program logic. This is both for readability/maintainability and to prepare for potentially supporting other version control systems (e.g. GitLab, BitBucket). --- GitHubClient.py | 351 +++++++++++++++++++ Issue.py | 21 ++ LineStatus.py | 8 + TodoParser.py | 528 ++++++++++++++++++++++++++++ main.py | 903 +----------------------------------------------- 5 files changed, 911 insertions(+), 900 deletions(-) create mode 100644 GitHubClient.py create mode 100644 Issue.py create mode 100644 LineStatus.py create mode 100644 TodoParser.py diff --git a/GitHubClient.py b/GitHubClient.py new file mode 100644 index 0000000..fa5453b --- /dev/null +++ b/GitHubClient.py @@ -0,0 +1,351 @@ +import os +import requests +import json + +class GitHubClient(object): + """Basic client for getting the last diff and managing issues.""" + existing_issues = [] + milestones = [] + + def __init__(self): + self.github_url = os.getenv('INPUT_GITHUB_URL') + self.base_url = f'{self.github_url}/' + self.repos_url = f'{self.base_url}repos/' + self.repo = os.getenv('INPUT_REPO') + self.before = os.getenv('INPUT_BEFORE') + self.sha = os.getenv('INPUT_SHA') + self.commits = json.loads(os.getenv('INPUT_COMMITS')) or [] + self.diff_url = os.getenv('INPUT_DIFF_URL') + self.token = os.getenv('INPUT_TOKEN') + self.issues_url = f'{self.repos_url}{self.repo}/issues' + self.milestones_url = f'{self.repos_url}{self.repo}/milestones' + self.issue_headers = { + 'Content-Type': 'application/json', + 'Authorization': f'token {self.token}', + 'X-GitHub-Api-Version': '2022-11-28' + } + self.graphql_headers = { + 'Authorization': f'Bearer {os.getenv("INPUT_PROJECTS_SECRET", "")}', + 'Accept': 'application/vnd.github.v4+json' + } + auto_p = os.getenv('INPUT_AUTO_P', 'true') == 'true' + self.line_break = '\n\n' if auto_p else '\n' + self.auto_assign = os.getenv('INPUT_AUTO_ASSIGN', 'false') == 'true' + self.actor = os.getenv('INPUT_ACTOR') + self.insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true' + if self.base_url == 'https://api.github.com/': + self.line_base_url = 'https://github.com/' + else: + self.line_base_url = self.base_url + self.project = os.getenv('INPUT_PROJECT', None) + # Retrieve the existing repo issues now so we can easily check them later. + self._get_existing_issues() + # Populate milestones so we can perform a lookup if one is specified. + self._get_milestones() + + def get_last_diff(self): + """Get the last diff.""" + if self.diff_url: + # Diff url was directly passed in config, likely due to this being a PR. + diff_url = self.diff_url + elif self.before != '0000000000000000000000000000000000000000': + # There is a valid before SHA to compare with, or this is a release being created. + diff_url = f'{self.repos_url}{self.repo}/compare/{self.before}...{self.sha}' + elif len(self.commits) == 1: + # There is only one commit. + diff_url = f'{self.repos_url}{self.repo}/commits/{self.sha}' + else: + # There are several commits: compare with the oldest one. + oldest = sorted(self.commits, key=self._get_timestamp)[0]['id'] + diff_url = f'{self.repos_url}{self.repo}/compare/{oldest}...{self.sha}' + + diff_headers = { + 'Accept': 'application/vnd.github.v3.diff', + 'Authorization': f'token {self.token}', + 'X-GitHub-Api-Version': '2022-11-28' + } + diff_request = requests.get(url=diff_url, headers=diff_headers) + if diff_request.status_code == 200: + return diff_request.text + raise Exception('Could not retrieve diff. Operation will abort.') + + # noinspection PyMethodMayBeStatic + def _get_timestamp(self, commit): + """Get a commit timestamp.""" + return commit.get('timestamp') + + def _get_milestones(self, page=1): + """Get all the milestones.""" + params = { + 'per_page': 100, + 'page': page, + 'state': 'open' + } + milestones_request = requests.get(self.milestones_url, headers=self.issue_headers, params=params) + if milestones_request.status_code == 200: + self.milestones.extend(milestones_request.json()) + links = milestones_request.links + if 'next' in links: + self._get_milestones(page + 1) + + def _get_milestone(self, title): + """Get the milestone number for the one with this title (creating one if it doesn't exist).""" + for m in self.milestones: + if m['title'] == title: + return m['number'] + else: + return self._create_milestone(title) + + def _create_milestone(self, title): + """Create a new milestone with this title.""" + milestone_data = { + 'title': title + } + milestone_request = requests.post(self.milestones_url, headers=self.issue_headers, json=milestone_data) + return milestone_request.json()['number'] if milestone_request.status_code == 201 else None + + def _get_existing_issues(self, page=1): + """Populate the existing issues list.""" + params = { + 'per_page': 100, + 'page': page, + 'state': 'open' + } + list_issues_request = requests.get(self.issues_url, headers=self.issue_headers, params=params) + if list_issues_request.status_code == 200: + self.existing_issues.extend(list_issues_request.json()) + links = list_issues_request.links + if 'next' in links: + self._get_existing_issues(page + 1) + + def _get_project_id(self, project): + """Get the project ID.""" + project_type, owner, project_name = project.split('/') + if project_type == 'user': + query = """ + query($owner: String!) { + user(login: $owner) { + projectsV2(first: 10) { + nodes { + id + title + } + } + } + } + """ + elif project_type == 'organization': + query = """ + query($owner: String!) { + organization(login: $owner) { + projectsV2(first: 10) { + nodes { + id + title + } + } + } + } + """ + else: + print("Invalid project type") + return None + + variables = { + 'owner': owner, + } + project_request = requests.post('https://api.github.com/graphql', + json={'query': query, 'variables': variables}, + headers=self.graphql_headers) + if project_request.status_code == 200: + projects = (project_request.json().get('data', {}).get(project_type, {}).get('projectsV2', {}) + .get('nodes', [])) + for project in projects: + if project['title'] == project_name: + return project['id'] + return None + + def _get_issue_global_id(self, owner, repo, issue_number): + """Get the global ID for a given issue.""" + query = """ + query($owner: String!, $repo: String!, $issue_number: Int!) { + repository(owner: $owner, name: $repo) { + issue(number: $issue_number) { + id + } + } + } + """ + variables = { + 'owner': owner, + 'repo': repo, + 'issue_number': issue_number + } + project_request = requests.post('https://api.github.com/graphql', + json={'query': query, 'variables': variables}, + headers=self.graphql_headers) + if project_request.status_code == 200: + return project_request.json()['data']['repository']['issue']['id'] + return None + + def _add_issue_to_project(self, issue_id, project_id): + """Attempt to add this issue to a project.""" + mutation = """ + mutation($projectId: ID!, $contentId: ID!) { + addProjectV2ItemById(input: {projectId: $projectId, contentId: $contentId}) { + item { + id + } + } + } + """ + variables = { + "projectId": project_id, + "contentId": issue_id + } + project_request = requests.post('https://api.github.com/graphql', + json={'query': mutation, 'variables': variables}, + headers=self.graphql_headers) + return project_request.status_code + + def _comment_issue(self, issue_number, comment): + """Post a comment on an issue.""" + issue_comment_url = f'{self.repos_url}{self.repo}/issues/{issue_number}/comments' + body = {'body': comment} + update_issue_request = requests.post(issue_comment_url, headers=self.issue_headers, json=body) + return update_issue_request.status_code + + def create_issue(self, issue): + """Create a dict containing the issue details and send it to GitHub.""" + formatted_issue_body = self.line_break.join(issue.body) + line_num_anchor = f'#L{issue.start_line}' + if issue.num_lines > 1: + line_num_anchor += f'-L{issue.start_line + issue.num_lines - 1}' + url_to_line = f'{self.line_base_url}{self.repo}/blob/{self.sha}/{issue.file_name}{line_num_anchor}' + snippet = '```' + issue.markdown_language + '\n' + issue.hunk + '\n' + '```' + + issue_template = os.getenv('INPUT_ISSUE_TEMPLATE', None) + if issue_template: + issue_contents = (issue_template.replace('{{ title }}', issue.title) + .replace('{{ body }}', formatted_issue_body) + .replace('{{ url }}', url_to_line) + .replace('{{ snippet }}', snippet) + ) + elif len(issue.body) != 0: + issue_contents = formatted_issue_body + '\n\n' + url_to_line + '\n\n' + snippet + else: + issue_contents = url_to_line + '\n\n' + snippet + + endpoint = self.issues_url + if issue.issue_url: + # Issue already exists, update existing rather than create new. + endpoint += f'/{issue.issue_number}' + + title = issue.title + + if issue.ref: + if issue.ref.startswith('@'): + # Ref = assignee. + issue.assignees.append(issue.ref.lstrip('@')) + elif issue.ref.startswith('!'): + # Ref = label. + issue.labels.append(issue.ref.lstrip('!')) + elif issue.ref.startswith('#'): + # Ref = issue number (indicating this is a comment on that issue). + issue_number = issue.ref.lstrip('#') + if issue_number.isdigit(): + # Create the comment now. + return self._comment_issue(issue_number, f'{issue.title}\n\n{issue_contents}'), None + else: + # Just prepend the ref to the title. + title = f'[{issue.ref}] {issue.title}' + + title = title + '...' if len(title) > 80 else title + new_issue_body = {'title': title, 'body': issue_contents, 'labels': issue.labels} + + # We need to check if any assignees/milestone specified exist, otherwise issue creation will fail. + valid_assignees = [] + if len(issue.assignees) == 0 and self.auto_assign: + valid_assignees.append(self.actor) + for assignee in issue.assignees: + assignee_url = f'{self.repos_url}{self.repo}/assignees/{assignee}' + assignee_request = requests.get(url=assignee_url, headers=self.issue_headers) + if assignee_request.status_code == 204: + valid_assignees.append(assignee) + else: + print(f'Assignee {assignee} does not exist! Dropping this assignee!') + new_issue_body['assignees'] = valid_assignees + + if issue.milestone: + milestone_number = self._get_milestone(issue.milestone) + if milestone_number: + new_issue_body['milestone'] = milestone_number + else: + print(f'Milestone {issue.milestone} could not be set. Dropping this milestone!') + + if issue.issue_url: + # Update existing issue. + issue_request = requests.patch(url=endpoint, headers=self.issue_headers, json=new_issue_body) + else: + # Create new issue. + issue_request = requests.post(url=endpoint, headers=self.issue_headers, json=new_issue_body) + + request_status = issue_request.status_code + issue_number = issue_request.json()['number'] if request_status in [200, 201] else None + + # Check if issue should be added to a project now it exists. + if issue_number and self.project: + project_id = self._get_project_id(self.project) + if project_id: + owner, repo = self.repo.split('/') + issue_id = self._get_issue_global_id(owner, repo, issue_number) + if issue_id: + self._add_issue_to_project(issue_id, project_id) + + return request_status, issue_number + + def close_issue(self, issue): + """Check to see if this issue can be found on GitHub and if so close it.""" + issue_number = None + if issue.issue_number: + # If URL insertion is enabled. + issue_number = issue.issue_number + else: + # Try simple matching. + matched = 0 + for existing_issue in self.existing_issues: + if existing_issue['title'] == issue.title: + matched += 1 + # If there are multiple issues with similar titles, don't try and close any. + if matched > 1: + print(f'Skipping issue (multiple matches)') + break + issue_number = existing_issue['number'] + if issue_number: + update_issue_url = f'{self.issues_url}/{issue_number}' + body = {'state': 'closed'} + requests.patch(update_issue_url, headers=self.issue_headers, json=body) + request_status = self._comment_issue(issue_number, f'Closed in {self.sha}.') + + # Update the description if this is a PR. + if os.getenv('GITHUB_EVENT_NAME') == 'pull_request': + pr_number = os.getenv('PR_NUMBER') + if pr_number: + request_status = self._update_pr_body(pr_number, body) + return request_status + return None + + def _update_pr_body(self, pr_number, issue_number): + """Add a close message for an issue to a PR.""" + pr_url = f'{self.repos_url}{self.repo}/pulls/{pr_number}' + pr_request = requests.get(pr_url, headers=self.issue_headers) + if pr_request.status_code == 200: + pr_body = pr_request.json()['body'] + close_message = f'Closes #{issue_number}' + if close_message not in pr_body: + updated_pr_body = f'{pr_body}\n\n{close_message}' if pr_body.strip() else close_message + body = {'body': updated_pr_body} + pr_update_request = requests.patch(pr_url, headers=self.issue_headers, json=body) + return pr_update_request.status_code + return pr_request.status_code + diff --git a/Issue.py b/Issue.py new file mode 100644 index 0000000..8a79d37 --- /dev/null +++ b/Issue.py @@ -0,0 +1,21 @@ +class Issue(object): + """Basic Issue model for collecting the necessary info to send to GitHub.""" + + def __init__(self, title, labels, assignees, milestone, body, hunk, file_name, + start_line, num_lines, markdown_language, status, identifier, ref, issue_url, issue_number): + self.title = title + self.labels = labels + self.assignees = assignees + self.milestone = milestone + self.body = body + self.hunk = hunk + self.file_name = file_name + self.start_line = start_line + self.num_lines = num_lines + self.markdown_language = markdown_language + self.status = status + self.identifier = identifier + self.ref = ref + self.issue_url = issue_url + self.issue_number = issue_number + diff --git a/LineStatus.py b/LineStatus.py new file mode 100644 index 0000000..eef645f --- /dev/null +++ b/LineStatus.py @@ -0,0 +1,8 @@ +from enum import Enum + +class LineStatus(Enum): + """Represents the status of a line in a diff file.""" + ADDED = 0 + DELETED = 1 + UNCHANGED = 2 + diff --git a/TodoParser.py b/TodoParser.py new file mode 100644 index 0000000..ffcf242 --- /dev/null +++ b/TodoParser.py @@ -0,0 +1,528 @@ +import os +import re +import requests +from ruamel.yaml import YAML +from LineStatus import * +from Issue import * +import requests +import json +from urllib.parse import urlparse + +class TodoParser(object): + """Parser for extracting information from a given diff file.""" + FILE_HUNK_PATTERN = r'(?<=diff)(.*?)(?=diff\s--git\s)' + HEADER_PATTERN = r'(?<=--git).*?(?=$\n(index|new|deleted))' + LINE_PATTERN = r'^.*$' + FILENAME_PATTERN = re.compile(r'(?<=a/).+?(?=\sb/)') + LINE_NUMBERS_PATTERN = re.compile(r'@@[\d\s,\-+]*\s@@.*') + LINE_NUMBERS_INNER_PATTERN = re.compile(r'@@[\d\s,\-+]*\s@@') + ADDITION_PATTERN = re.compile(r'(?<=^\+).*') + DELETION_PATTERN = re.compile(r'(?<=^-).*') + REF_PATTERN = re.compile(r'.+?(?=\))') + LABELS_PATTERN = re.compile(r'(?<=labels:\s).+', re.IGNORECASE) + ASSIGNEES_PATTERN = re.compile(r'(?<=assignees:\s).+', re.IGNORECASE) + MILESTONE_PATTERN = re.compile(r'(?<=milestone:\s).+', re.IGNORECASE) + ISSUE_URL_PATTERN = re.compile(r'(?<=Issue URL:\s).+', re.IGNORECASE) + ISSUE_NUMBER_PATTERN = re.compile(r'/issues/(\d+)', re.IGNORECASE) + + def __init__(self): + # Determine if the issues should be escaped. + self.should_escape = os.getenv('INPUT_ESCAPE', 'true') == 'true' + # Load any custom identifiers, otherwise use the default. + custom_identifiers = os.getenv('INPUT_IDENTIFIERS') + self.identifiers = ['TODO'] + self.identifiers_dict = None + if custom_identifiers: + try: + custom_identifiers_dict = json.loads(custom_identifiers) + for identifier_dict in custom_identifiers_dict: + if type(identifier_dict['name']) is not str or type(identifier_dict['labels']) is not list: + raise TypeError + self.identifiers = [identifier['name'] for identifier in custom_identifiers_dict] + self.identifiers_dict = custom_identifiers_dict + except (json.JSONDecodeError, KeyError, TypeError): + print('Invalid identifiers dict, ignoring.') + + self.languages_dict = None + # Check if the standard collections should be loaded. + if os.getenv('INPUT_NO_STANDARD', 'false') != 'true': + # Load the languages data for ascertaining file types. + languages_url = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml' + languages_request = requests.get(url=languages_url) + if languages_request.status_code == 200: + languages_data = languages_request.text + yaml = YAML(typ='safe') + self.languages_dict = yaml.load(languages_data) + else: + raise Exception('Cannot retrieve languages data. Operation will abort.') + + # Load the comment syntax data for identifying comments. + syntax_url = 'https://raw.githubusercontent.com/alstr/todo-to-issue-action/master/syntax.json' + syntax_request = requests.get(url=syntax_url) + if syntax_request.status_code == 200: + self.syntax_dict = syntax_request.json() + else: + raise Exception('Cannot retrieve syntax data. Operation will abort.') + else: + self.syntax_dict = [] + self.languages_dict = {} + + custom_languages = os.getenv('INPUT_LANGUAGES', '') + if custom_languages != '': + # Load all custom languages. + for path in custom_languages.split(','): + # noinspection PyBroadException + try: + # Decide if the path is a url or local file. + if path.startswith('http'): + languages_request = requests.get(path) + if languages_request.status_code != 200: + print(f'Cannot retrieve custom language file "{path}".') + continue + data = languages_request.json() + else: + path = os.path.join(os.getcwd(), path) + if not os.path.exists(path) or not os.path.isfile(path): + print(f'Cannot retrieve custom language file "{path}".') + continue + with open(path) as f: + data = json.load(f) + + # Iterate through the definitions. + for lang in data: + # Add/replace the language definition. + self.languages_dict[lang['language']] = {} + self.languages_dict[lang['language']]['type'] = '' + self.languages_dict[lang['language']]['color'] = '' + self.languages_dict[lang['language']]['extensions'] = lang['extensions'] + self.languages_dict[lang['language']]['source'] = '' + self.languages_dict[lang['language']]['ace_mode'] = 'text' + self.languages_dict[lang['language']]['language_id'] = 0 + + # Check if comment syntax for the language name already exists. + counter = 0 + exists = False + for syntax in self.syntax_dict: + if syntax['language'] == lang['language']: + exists = True + break + + counter = counter + 1 + + if exists: + # When the syntax exists it will be popped out of the list. + self.syntax_dict.pop(counter) + + # And be replaced with the new syntax definition. + self.syntax_dict.append({ + 'language': lang['language'], + 'markers': lang['markers'] + }) + except Exception: + print(f'An error occurred in the custom language file "{path}".') + print('Please check the file, or if it represents undefined behavior, ' + 'create an issue at https://github.com/alstr/todo-to-issue-action/issues.') + + # noinspection PyTypeChecker + def parse(self, diff_file): + issues = [] + + # The parser works by gradually breaking the diff file down into smaller and smaller segments. + # At each level relevant information is extracted. + + # First separate the diff into sections for each changed file. + file_hunks = re.finditer(self.FILE_HUNK_PATTERN, diff_file.read(), re.DOTALL) + last_end = None + extracted_file_hunks = [] + for i, file_hunk in enumerate(file_hunks): + extracted_file_hunks.append(file_hunk.group(0)) + last_end = file_hunk.end() + diff_file.seek(0) + extracted_file_hunks.append(diff_file.read()[last_end:]) + diff_file.close() + + code_blocks = [] + prev_block = None + # Iterate through each section extracted above. + for hunk in extracted_file_hunks: + # Extract the file information so we can figure out the Markdown language and comment syntax. + header_search = re.search(self.HEADER_PATTERN, hunk, re.MULTILINE) + if not header_search: + continue + files = header_search.group(0) + + filename_search = re.search(self.FILENAME_PATTERN, files) + if not filename_search: + continue + curr_file = filename_search.group(0) + if self._should_ignore(curr_file): + continue + curr_markers, curr_markdown_language = self._get_file_details(curr_file) + if not curr_markers or not curr_markdown_language: + print(f'Could not check "{curr_file}" for TODOs as this language is not yet supported by default.') + continue + + # Break this section down into individual changed code blocks. + line_numbers_iterator = re.finditer(self.LINE_NUMBERS_PATTERN, hunk) + for i, line_numbers in enumerate(line_numbers_iterator): + line_numbers_inner_search = re.search(self.LINE_NUMBERS_INNER_PATTERN, line_numbers.group(0)) + line_numbers_str = line_numbers_inner_search.group(0).strip('@@ -') + start_line = line_numbers_str.split(' ')[1].strip('+') + start_line = int(start_line.split(',')[0]) + + # Put this information into a temporary dict for simplicity. + block = { + 'file': curr_file, + 'markers': curr_markers, + 'markdown_language': curr_markdown_language, + 'start_line': start_line, + 'hunk': hunk, + 'hunk_start': line_numbers.end(), + 'hunk_end': None + } + + prev_index = len(code_blocks) - 1 + # Set the end of the last code block based on the start of this one. + if prev_block and prev_block['file'] == block['file']: + # noinspection PyTypedDict + code_blocks[prev_index]['hunk_end'] = line_numbers.start() + code_blocks[prev_index]['hunk'] = (prev_block['hunk'] + [prev_block['hunk_start']:line_numbers.start()]) + elif prev_block: + code_blocks[prev_index]['hunk'] = prev_block['hunk'][prev_block['hunk_start']:] + + code_blocks.append(block) + prev_block = block + + if len(code_blocks) > 0: + last_index = len(code_blocks) - 1 + last_block = code_blocks[last_index] + code_blocks[last_index]['hunk'] = last_block['hunk'][last_block['hunk_start']:] + + # Now for each code block, check for comments, then those comments for TODOs. + for block in code_blocks: + for marker in block['markers']: + # Check if there are line or block comments. + if marker['type'] == 'line': + # Add a negative lookup to include the second character from alternative comment patterns. + # This step is essential to handle cases like in Julia, where '#' and '#=' are comment patterns. + # It ensures that when a space after the comment is optional ('\s' => '\s*'), + # the second character would be matched because of the any character expression ('.+'). + suff_escape_list = [] + pref_escape_list = [] + for to_escape in block['markers']: + if to_escape['type'] == 'line': + if to_escape['pattern'] == marker['pattern']: + continue + if marker['pattern'][0] == to_escape['pattern'][0]: + suff_escape_list.append(self._extract_character(to_escape['pattern'], 1)) + else: + # Block comments and line comments cannot have the same comment pattern, + # so a check if the string is the same is unnecessary. + if to_escape['pattern']['start'][0] == marker['pattern'][0]: + suff_escape_list.append(self._extract_character(to_escape['pattern']['start'], 1)) + search = to_escape['pattern']['end'].find(marker['pattern']) + if search != -1: + pref_escape_list.append(self._extract_character(to_escape['pattern']['end'], + search - 1)) + + comment_pattern = (r'(^.*' + + (r'(? 0 + else '') + + marker['pattern'] + + (r'(?!(' + '|'.join(suff_escape_list) + r'))' if len(suff_escape_list) > 0 + else '') + + r'\s*.+$)') + comments = re.finditer(comment_pattern, block['hunk'], re.MULTILINE) + extracted_comments = [] + prev_comment = None + for i, comment in enumerate(comments): + if prev_comment and comment.start() == prev_comment.end() + 1: + extracted_comments[len(extracted_comments) - 1].append(comment) + else: + extracted_comments.append([comment]) + prev_comment = comment + for comment in extracted_comments: + extracted_issues = self._extract_issue_if_exists(comment, marker, block) + if extracted_issues: + issues.extend(extracted_issues) + else: + comment_pattern = (r'(?:[+\-\s]\s*' + marker['pattern']['start'] + r'.*?' + + marker['pattern']['end'] + ')') + comments = re.finditer(comment_pattern, block['hunk'], re.DOTALL) + extracted_comments = [] + for i, comment in enumerate(comments): + if re.search('|'.join(self.identifiers), comment.group(0)): + extracted_comments.append([comment]) + + for comment in extracted_comments: + extracted_issues = self._extract_issue_if_exists(comment, marker, block) + if extracted_issues: + issues.extend(extracted_issues) + + for i, issue in enumerate(issues): + # Strip some of the diff symbols so it can be included as a code snippet in the issue body. + # Strip removed lines. + cleaned_hunk = re.sub(r'\n^-.*$', '', issue.hunk, 0, re.MULTILINE) + # Strip leading symbols/whitespace. + cleaned_hunk = re.sub(r'^.', '', cleaned_hunk, 0, re.MULTILINE) + # Strip newline message. + cleaned_hunk = re.sub(r'\n\sNo newline at end of file', '', cleaned_hunk, 0, re.MULTILINE) + issue.hunk = cleaned_hunk + + return issues + + def _get_language_details(self, language_name, attribute, value): + """Try and get the Markdown language and comment syntax data based on a specified attribute of the language.""" + attributes = [at.lower() for at in self.languages_dict[language_name][attribute]] + if value.lower() in attributes: + for syntax_details in self.syntax_dict: + if syntax_details['language'] == language_name: + return syntax_details['markers'], self.languages_dict[language_name]['ace_mode'] + return None, None + + def _get_file_details(self, file): + """Try and get the Markdown language and comment syntax data for the given file.""" + file_name, extension = os.path.splitext(os.path.basename(file)) + for language_name in self.languages_dict: + # Check if the file extension matches the language's extensions. + if extension != '' and 'extensions' in self.languages_dict[language_name]: + syntax_details, ace_mode = self._get_language_details(language_name, 'extensions', extension) + if syntax_details is not None and ace_mode is not None: + return syntax_details, ace_mode + # Check if the file name matches the language's filenames. + if 'filenames' in self.languages_dict[language_name]: + syntax_details, ace_mode = self._get_language_details(language_name, 'filenames', file_name) + if syntax_details is not None and ace_mode is not None: + return syntax_details, ace_mode + return None, None + + def _extract_issue_if_exists(self, comment, marker, code_block): + """Check this comment for TODOs, and if found, build an Issue object.""" + curr_issue = None + found_issues = [] + line_statuses = [] + prev_line_title = False + for match in comment: + comment_lines = match.group().split('\n') + for line in comment_lines: + line_status, committed_line = self._get_line_status(line) + line_statuses.append(line_status) + cleaned_line = self._clean_line(committed_line, marker) + line_title, ref, identifier = self._get_title(cleaned_line) + if line_title: + if prev_line_title and line_status == line_statuses[-2]: + # This means that there is a separate one-line TODO directly above this one. + # We need to store the previous one. + curr_issue.status = line_status + found_issues.append(curr_issue) + curr_issue = Issue( + title=line_title, + labels=[], + assignees=[], + milestone=None, + body=[], + hunk=code_block['hunk'], + file_name=code_block['file'], + start_line=code_block['start_line'], + num_lines=1, + markdown_language=code_block['markdown_language'], + status=line_status, + identifier=identifier, + ref=ref, + issue_url=None, + issue_number=None + ) + prev_line_title = True + + # Calculate the file line number that this issue references. + hunk_lines = re.finditer(self.LINE_PATTERN, code_block['hunk'], re.MULTILINE) + start_line = code_block['start_line'] + for i, hunk_line in enumerate(hunk_lines): + if hunk_line.group(0) == line: + curr_issue.start_line = start_line + break + if i != 0 and (hunk_line.group(0).startswith('+') or not hunk_line.group(0).startswith('-')): + start_line += 1 + + elif curr_issue: + # Extract other issue information that may exist below the title. + line_labels = self._get_labels(cleaned_line) + line_assignees = self._get_assignees(cleaned_line) + line_milestone = self._get_milestone(cleaned_line) + line_url = self._get_issue_url(cleaned_line) + if line_labels: + curr_issue.labels.extend(line_labels) + elif line_assignees: + curr_issue.assignees.extend(line_assignees) + elif line_milestone: + curr_issue.milestone = line_milestone + elif line_url: + curr_issue.issue_url = line_url + issue_number_search = self.ISSUE_NUMBER_PATTERN.search(line_url) + if issue_number_search: + curr_issue.issue_number = issue_number_search.group(1) + elif len(cleaned_line) and line_status != LineStatus.DELETED: + if self.should_escape: + curr_issue.body.append(self._escape_markdown(cleaned_line)) + else: + curr_issue.body.append(cleaned_line) + if not line.startswith('-'): + curr_issue.num_lines += 1 + if not line_title: + prev_line_title = False + if curr_issue is not None and curr_issue.identifier is not None and self.identifiers_dict is not None: + for identifier_dict in self.identifiers_dict: + if identifier_dict['name'] == curr_issue.identifier: + for label in identifier_dict['labels']: + if label not in curr_issue.labels: + curr_issue.labels.append(label) + + if curr_issue is not None: + # If all the lines are unchanged, don't do anything. + if all(s == LineStatus.UNCHANGED for s in line_statuses): + return None + # If the title line hasn't changed, but the info below has, we need to mark it as an update (addition). + if (curr_issue.status == LineStatus.UNCHANGED + and (LineStatus.ADDED in line_statuses or LineStatus.DELETED in line_statuses)): + curr_issue.status = LineStatus.ADDED + + found_issues.append(curr_issue) + + return found_issues + + @staticmethod + def _escape_markdown(comment): + # All basic characters according to: https://www.markdownguide.org/basic-syntax + must_escape = ['\\', '<', '>', '#', '`', '*', '_', '[', ']', '(', ')', '!', '+', '-', '.', '|', '{', '}', '~', + '='] + + escaped = '' + + # Linear Escape Algorithm, because the algorithm ends in an infinite loop when using the function 'replace', + # which tries to replace all backslashes with duplicate backslashes, i.e. also the already other escaped + # characters. + for c in comment: + if c in must_escape: + escaped += '\\' + c + else: + escaped += c + return escaped + + @staticmethod + def _extract_character(input_str, pos): + # Extracts a character from the input string at the specified position, + # considering escape sequences when applicable. + # Test cases + # print(_extract_character("/\\*", 1)) # Output: "\*" + # print(_extract_character("\\*", 0)) # Output: "\*" + # print(_extract_character("\\", 0)) # Output: "\\" + # print(_extract_character("w", 0)) # Output: "w" + # print(_extract_character("wa", 1)) # Output: "a" + # print(_extract_character("\\\\w", 1)) # Output: "\\" + if input_str[pos] == '\\': + if pos >= 1 and not input_str[pos - 1] == '\\' and len(input_str) > pos + 1: + return '\\' + input_str[pos + 1] + return '\\\\' + if pos >= 1: + if input_str[pos - 1] == '\\': + return '\\' + input_str[pos] + return input_str[pos] + + def _get_line_status(self, comment): + """Return a Tuple indicating whether this is an addition/deletion/unchanged, plus the cleaned comment.""" + addition_search = self.ADDITION_PATTERN.search(comment) + if addition_search: + return LineStatus.ADDED, addition_search.group(0) + else: + deletion_search = self.DELETION_PATTERN.search(comment) + if deletion_search: + return LineStatus.DELETED, deletion_search.group(0) + return LineStatus.UNCHANGED, comment[1:] + + @staticmethod + def _clean_line(comment, marker): + """Remove unwanted symbols and whitespace.""" + comment = comment.strip() + if marker['type'] == 'block': + start_pattern = r'^' + marker['pattern']['start'] + end_pattern = marker['pattern']['end'] + r'$' + comment = re.sub(start_pattern, '', comment) + comment = re.sub(end_pattern, '', comment) + # Some block comments might have an asterisk on each line. + if '*' in start_pattern and comment.startswith('*'): + comment = comment.lstrip('*') + else: + pattern = r'^' + marker['pattern'] + comment = re.sub(pattern, '', comment) + return comment.strip() + + def _get_title(self, comment): + """Check the passed comment for a new issue title (and reference, if specified).""" + title = None + ref = None + title_identifier = None + for identifier in self.identifiers: + title_identifier = identifier + title_pattern = re.compile(fr'(?<={identifier}[\s:]).+', re.IGNORECASE) + title_search = title_pattern.search(comment, re.IGNORECASE) + if title_search: + title = title_search.group(0).strip(': ') + break + else: + title_ref_pattern = re.compile(fr'(?<={identifier}\().+', re.IGNORECASE) + title_ref_search = title_ref_pattern.search(comment, re.IGNORECASE) + if title_ref_search: + title = title_ref_search.group(0).strip() + ref_search = self.REF_PATTERN.search(title) + if ref_search: + ref = ref_search.group(0) + title = title.replace(ref, '', 1).lstrip(':) ') + break + return title, ref, title_identifier + + def _get_issue_url(self, comment): + """Check the passed comment for a GitHub issue URL.""" + url_search = self.ISSUE_URL_PATTERN.search(comment, re.IGNORECASE) + url = None + if url_search: + url = url_search.group(0) + parsed_url = urlparse(url) + return url if all([parsed_url.scheme, parsed_url.netloc]) else None + return url + + def _get_labels(self, comment): + """Check the passed comment for issue labels.""" + labels_search = self.LABELS_PATTERN.search(comment, re.IGNORECASE) + labels = [] + if labels_search: + labels = labels_search.group(0).replace(', ', ',') + labels = list(filter(None, labels.split(','))) + return labels + + def _get_assignees(self, comment): + """Check the passed comment for issue assignees.""" + assignees_search = self.ASSIGNEES_PATTERN.search(comment, re.IGNORECASE) + assignees = [] + if assignees_search: + assignees = assignees_search.group(0).replace(', ', ',') + assignees = list(filter(None, assignees.split(','))) + return assignees + + def _get_milestone(self, comment): + """Check the passed comment for a milestone.""" + milestone_search = self.MILESTONE_PATTERN.search(comment, re.IGNORECASE) + milestone = None + if milestone_search: + milestone = milestone_search.group(0) + return milestone + + # noinspection PyMethodMayBeStatic + def _should_ignore(self, file): + ignore_patterns = os.getenv('INPUT_IGNORE', None) + if ignore_patterns: + for pattern in filter(None, [pattern.strip() for pattern in ignore_patterns.split(',')]): + if re.match(pattern, file): + return True + return False + diff --git a/main.py b/main.py index 41151c3..28d2242 100644 --- a/main.py +++ b/main.py @@ -2,913 +2,16 @@ """Convert IDE TODOs to GitHub issues.""" import os -import requests import re -import json from time import sleep from io import StringIO from ruamel.yaml import YAML -from enum import Enum import itertools import operator from collections import defaultdict -from urllib.parse import urlparse - - -class LineStatus(Enum): - """Represents the status of a line in a diff file.""" - ADDED = 0 - DELETED = 1 - UNCHANGED = 2 - - -class Issue(object): - """Basic Issue model for collecting the necessary info to send to GitHub.""" - - def __init__(self, title, labels, assignees, milestone, body, hunk, file_name, - start_line, num_lines, markdown_language, status, identifier, ref, issue_url, issue_number): - self.title = title - self.labels = labels - self.assignees = assignees - self.milestone = milestone - self.body = body - self.hunk = hunk - self.file_name = file_name - self.start_line = start_line - self.num_lines = num_lines - self.markdown_language = markdown_language - self.status = status - self.identifier = identifier - self.ref = ref - self.issue_url = issue_url - self.issue_number = issue_number - - -class GitHubClient(object): - """Basic client for getting the last diff and managing issues.""" - existing_issues = [] - milestones = [] - - def __init__(self): - self.github_url = os.getenv('INPUT_GITHUB_URL') - self.base_url = f'{self.github_url}/' - self.repos_url = f'{self.base_url}repos/' - self.repo = os.getenv('INPUT_REPO') - self.before = os.getenv('INPUT_BEFORE') - self.sha = os.getenv('INPUT_SHA') - self.commits = json.loads(os.getenv('INPUT_COMMITS')) or [] - self.diff_url = os.getenv('INPUT_DIFF_URL') - self.token = os.getenv('INPUT_TOKEN') - self.issues_url = f'{self.repos_url}{self.repo}/issues' - self.milestones_url = f'{self.repos_url}{self.repo}/milestones' - self.issue_headers = { - 'Content-Type': 'application/json', - 'Authorization': f'token {self.token}', - 'X-GitHub-Api-Version': '2022-11-28' - } - self.graphql_headers = { - 'Authorization': f'Bearer {os.getenv("INPUT_PROJECTS_SECRET", "")}', - 'Accept': 'application/vnd.github.v4+json' - } - auto_p = os.getenv('INPUT_AUTO_P', 'true') == 'true' - self.line_break = '\n\n' if auto_p else '\n' - self.auto_assign = os.getenv('INPUT_AUTO_ASSIGN', 'false') == 'true' - self.actor = os.getenv('INPUT_ACTOR') - self.insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true' - if self.base_url == 'https://api.github.com/': - self.line_base_url = 'https://github.com/' - else: - self.line_base_url = self.base_url - self.project = os.getenv('INPUT_PROJECT', None) - # Retrieve the existing repo issues now so we can easily check them later. - self._get_existing_issues() - # Populate milestones so we can perform a lookup if one is specified. - self._get_milestones() - - def get_last_diff(self): - """Get the last diff.""" - if self.diff_url: - # Diff url was directly passed in config, likely due to this being a PR. - diff_url = self.diff_url - elif self.before != '0000000000000000000000000000000000000000': - # There is a valid before SHA to compare with, or this is a release being created. - diff_url = f'{self.repos_url}{self.repo}/compare/{self.before}...{self.sha}' - elif len(self.commits) == 1: - # There is only one commit. - diff_url = f'{self.repos_url}{self.repo}/commits/{self.sha}' - else: - # There are several commits: compare with the oldest one. - oldest = sorted(self.commits, key=self._get_timestamp)[0]['id'] - diff_url = f'{self.repos_url}{self.repo}/compare/{oldest}...{self.sha}' - - diff_headers = { - 'Accept': 'application/vnd.github.v3.diff', - 'Authorization': f'token {self.token}', - 'X-GitHub-Api-Version': '2022-11-28' - } - diff_request = requests.get(url=diff_url, headers=diff_headers) - if diff_request.status_code == 200: - return diff_request.text - raise Exception('Could not retrieve diff. Operation will abort.') - - # noinspection PyMethodMayBeStatic - def _get_timestamp(self, commit): - """Get a commit timestamp.""" - return commit.get('timestamp') - - def _get_milestones(self, page=1): - """Get all the milestones.""" - params = { - 'per_page': 100, - 'page': page, - 'state': 'open' - } - milestones_request = requests.get(self.milestones_url, headers=self.issue_headers, params=params) - if milestones_request.status_code == 200: - self.milestones.extend(milestones_request.json()) - links = milestones_request.links - if 'next' in links: - self._get_milestones(page + 1) - - def _get_milestone(self, title): - """Get the milestone number for the one with this title (creating one if it doesn't exist).""" - for m in self.milestones: - if m['title'] == title: - return m['number'] - else: - return self._create_milestone(title) - - def _create_milestone(self, title): - """Create a new milestone with this title.""" - milestone_data = { - 'title': title - } - milestone_request = requests.post(self.milestones_url, headers=self.issue_headers, json=milestone_data) - return milestone_request.json()['number'] if milestone_request.status_code == 201 else None - - def _get_existing_issues(self, page=1): - """Populate the existing issues list.""" - params = { - 'per_page': 100, - 'page': page, - 'state': 'open' - } - list_issues_request = requests.get(self.issues_url, headers=self.issue_headers, params=params) - if list_issues_request.status_code == 200: - self.existing_issues.extend(list_issues_request.json()) - links = list_issues_request.links - if 'next' in links: - self._get_existing_issues(page + 1) - - def _get_project_id(self, project): - """Get the project ID.""" - project_type, owner, project_name = project.split('/') - if project_type == 'user': - query = """ - query($owner: String!) { - user(login: $owner) { - projectsV2(first: 10) { - nodes { - id - title - } - } - } - } - """ - elif project_type == 'organization': - query = """ - query($owner: String!) { - organization(login: $owner) { - projectsV2(first: 10) { - nodes { - id - title - } - } - } - } - """ - else: - print("Invalid project type") - return None - - variables = { - 'owner': owner, - } - project_request = requests.post('https://api.github.com/graphql', - json={'query': query, 'variables': variables}, - headers=self.graphql_headers) - if project_request.status_code == 200: - projects = (project_request.json().get('data', {}).get(project_type, {}).get('projectsV2', {}) - .get('nodes', [])) - for project in projects: - if project['title'] == project_name: - return project['id'] - return None - - def _get_issue_global_id(self, owner, repo, issue_number): - """Get the global ID for a given issue.""" - query = """ - query($owner: String!, $repo: String!, $issue_number: Int!) { - repository(owner: $owner, name: $repo) { - issue(number: $issue_number) { - id - } - } - } - """ - variables = { - 'owner': owner, - 'repo': repo, - 'issue_number': issue_number - } - project_request = requests.post('https://api.github.com/graphql', - json={'query': query, 'variables': variables}, - headers=self.graphql_headers) - if project_request.status_code == 200: - return project_request.json()['data']['repository']['issue']['id'] - return None - - def _add_issue_to_project(self, issue_id, project_id): - """Attempt to add this issue to a project.""" - mutation = """ - mutation($projectId: ID!, $contentId: ID!) { - addProjectV2ItemById(input: {projectId: $projectId, contentId: $contentId}) { - item { - id - } - } - } - """ - variables = { - "projectId": project_id, - "contentId": issue_id - } - project_request = requests.post('https://api.github.com/graphql', - json={'query': mutation, 'variables': variables}, - headers=self.graphql_headers) - return project_request.status_code - - def _comment_issue(self, issue_number, comment): - """Post a comment on an issue.""" - issue_comment_url = f'{self.repos_url}{self.repo}/issues/{issue_number}/comments' - body = {'body': comment} - update_issue_request = requests.post(issue_comment_url, headers=self.issue_headers, json=body) - return update_issue_request.status_code - - def create_issue(self, issue): - """Create a dict containing the issue details and send it to GitHub.""" - formatted_issue_body = self.line_break.join(issue.body) - line_num_anchor = f'#L{issue.start_line}' - if issue.num_lines > 1: - line_num_anchor += f'-L{issue.start_line + issue.num_lines - 1}' - url_to_line = f'{self.line_base_url}{self.repo}/blob/{self.sha}/{issue.file_name}{line_num_anchor}' - snippet = '```' + issue.markdown_language + '\n' + issue.hunk + '\n' + '```' - - issue_template = os.getenv('INPUT_ISSUE_TEMPLATE', None) - if issue_template: - issue_contents = (issue_template.replace('{{ title }}', issue.title) - .replace('{{ body }}', formatted_issue_body) - .replace('{{ url }}', url_to_line) - .replace('{{ snippet }}', snippet) - ) - elif len(issue.body) != 0: - issue_contents = formatted_issue_body + '\n\n' + url_to_line + '\n\n' + snippet - else: - issue_contents = url_to_line + '\n\n' + snippet - - endpoint = self.issues_url - if issue.issue_url: - # Issue already exists, update existing rather than create new. - endpoint += f'/{issue.issue_number}' - - title = issue.title - - if issue.ref: - if issue.ref.startswith('@'): - # Ref = assignee. - issue.assignees.append(issue.ref.lstrip('@')) - elif issue.ref.startswith('!'): - # Ref = label. - issue.labels.append(issue.ref.lstrip('!')) - elif issue.ref.startswith('#'): - # Ref = issue number (indicating this is a comment on that issue). - issue_number = issue.ref.lstrip('#') - if issue_number.isdigit(): - # Create the comment now. - return self._comment_issue(issue_number, f'{issue.title}\n\n{issue_contents}'), None - else: - # Just prepend the ref to the title. - title = f'[{issue.ref}] {issue.title}' - - title = title + '...' if len(title) > 80 else title - new_issue_body = {'title': title, 'body': issue_contents, 'labels': issue.labels} - - # We need to check if any assignees/milestone specified exist, otherwise issue creation will fail. - valid_assignees = [] - if len(issue.assignees) == 0 and self.auto_assign: - valid_assignees.append(self.actor) - for assignee in issue.assignees: - assignee_url = f'{self.repos_url}{self.repo}/assignees/{assignee}' - assignee_request = requests.get(url=assignee_url, headers=self.issue_headers) - if assignee_request.status_code == 204: - valid_assignees.append(assignee) - else: - print(f'Assignee {assignee} does not exist! Dropping this assignee!') - new_issue_body['assignees'] = valid_assignees - - if issue.milestone: - milestone_number = self._get_milestone(issue.milestone) - if milestone_number: - new_issue_body['milestone'] = milestone_number - else: - print(f'Milestone {issue.milestone} could not be set. Dropping this milestone!') - - if issue.issue_url: - # Update existing issue. - issue_request = requests.patch(url=endpoint, headers=self.issue_headers, json=new_issue_body) - else: - # Create new issue. - issue_request = requests.post(url=endpoint, headers=self.issue_headers, json=new_issue_body) - - request_status = issue_request.status_code - issue_number = issue_request.json()['number'] if request_status in [200, 201] else None - - # Check if issue should be added to a project now it exists. - if issue_number and self.project: - project_id = self._get_project_id(self.project) - if project_id: - owner, repo = self.repo.split('/') - issue_id = self._get_issue_global_id(owner, repo, issue_number) - if issue_id: - self._add_issue_to_project(issue_id, project_id) - - return request_status, issue_number - - def close_issue(self, issue): - """Check to see if this issue can be found on GitHub and if so close it.""" - issue_number = None - if issue.issue_number: - # If URL insertion is enabled. - issue_number = issue.issue_number - else: - # Try simple matching. - matched = 0 - for existing_issue in self.existing_issues: - if existing_issue['title'] == issue.title: - matched += 1 - # If there are multiple issues with similar titles, don't try and close any. - if matched > 1: - print(f'Skipping issue (multiple matches)') - break - issue_number = existing_issue['number'] - if issue_number: - update_issue_url = f'{self.issues_url}/{issue_number}' - body = {'state': 'closed'} - requests.patch(update_issue_url, headers=self.issue_headers, json=body) - request_status = self._comment_issue(issue_number, f'Closed in {self.sha}.') - - # Update the description if this is a PR. - if os.getenv('GITHUB_EVENT_NAME') == 'pull_request': - pr_number = os.getenv('PR_NUMBER') - if pr_number: - request_status = self._update_pr_body(pr_number, body) - return request_status - return None - - def _update_pr_body(self, pr_number, issue_number): - """Add a close message for an issue to a PR.""" - pr_url = f'{self.repos_url}{self.repo}/pulls/{pr_number}' - pr_request = requests.get(pr_url, headers=self.issue_headers) - if pr_request.status_code == 200: - pr_body = pr_request.json()['body'] - close_message = f'Closes #{issue_number}' - if close_message not in pr_body: - updated_pr_body = f'{pr_body}\n\n{close_message}' if pr_body.strip() else close_message - body = {'body': updated_pr_body} - pr_update_request = requests.patch(pr_url, headers=self.issue_headers, json=body) - return pr_update_request.status_code - return pr_request.status_code - - -class TodoParser(object): - """Parser for extracting information from a given diff file.""" - FILE_HUNK_PATTERN = r'(?<=diff)(.*?)(?=diff\s--git\s)' - HEADER_PATTERN = r'(?<=--git).*?(?=$\n(index|new|deleted))' - LINE_PATTERN = r'^.*$' - FILENAME_PATTERN = re.compile(r'(?<=a/).+?(?=\sb/)') - LINE_NUMBERS_PATTERN = re.compile(r'@@[\d\s,\-+]*\s@@.*') - LINE_NUMBERS_INNER_PATTERN = re.compile(r'@@[\d\s,\-+]*\s@@') - ADDITION_PATTERN = re.compile(r'(?<=^\+).*') - DELETION_PATTERN = re.compile(r'(?<=^-).*') - REF_PATTERN = re.compile(r'.+?(?=\))') - LABELS_PATTERN = re.compile(r'(?<=labels:\s).+', re.IGNORECASE) - ASSIGNEES_PATTERN = re.compile(r'(?<=assignees:\s).+', re.IGNORECASE) - MILESTONE_PATTERN = re.compile(r'(?<=milestone:\s).+', re.IGNORECASE) - ISSUE_URL_PATTERN = re.compile(r'(?<=Issue URL:\s).+', re.IGNORECASE) - ISSUE_NUMBER_PATTERN = re.compile(r'/issues/(\d+)', re.IGNORECASE) - - def __init__(self): - # Determine if the issues should be escaped. - self.should_escape = os.getenv('INPUT_ESCAPE', 'true') == 'true' - # Load any custom identifiers, otherwise use the default. - custom_identifiers = os.getenv('INPUT_IDENTIFIERS') - self.identifiers = ['TODO'] - self.identifiers_dict = None - if custom_identifiers: - try: - custom_identifiers_dict = json.loads(custom_identifiers) - for identifier_dict in custom_identifiers_dict: - if type(identifier_dict['name']) is not str or type(identifier_dict['labels']) is not list: - raise TypeError - self.identifiers = [identifier['name'] for identifier in custom_identifiers_dict] - self.identifiers_dict = custom_identifiers_dict - except (json.JSONDecodeError, KeyError, TypeError): - print('Invalid identifiers dict, ignoring.') - - self.languages_dict = None - # Check if the standard collections should be loaded. - if os.getenv('INPUT_NO_STANDARD', 'false') != 'true': - # Load the languages data for ascertaining file types. - languages_url = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml' - languages_request = requests.get(url=languages_url) - if languages_request.status_code == 200: - languages_data = languages_request.text - yaml = YAML(typ='safe') - self.languages_dict = yaml.load(languages_data) - else: - raise Exception('Cannot retrieve languages data. Operation will abort.') - - # Load the comment syntax data for identifying comments. - syntax_url = 'https://raw.githubusercontent.com/alstr/todo-to-issue-action/master/syntax.json' - syntax_request = requests.get(url=syntax_url) - if syntax_request.status_code == 200: - self.syntax_dict = syntax_request.json() - else: - raise Exception('Cannot retrieve syntax data. Operation will abort.') - else: - self.syntax_dict = [] - self.languages_dict = {} - - custom_languages = os.getenv('INPUT_LANGUAGES', '') - if custom_languages != '': - # Load all custom languages. - for path in custom_languages.split(','): - # noinspection PyBroadException - try: - # Decide if the path is a url or local file. - if path.startswith('http'): - languages_request = requests.get(path) - if languages_request.status_code != 200: - print(f'Cannot retrieve custom language file "{path}".') - continue - data = languages_request.json() - else: - path = os.path.join(os.getcwd(), path) - if not os.path.exists(path) or not os.path.isfile(path): - print(f'Cannot retrieve custom language file "{path}".') - continue - with open(path) as f: - data = json.load(f) - - # Iterate through the definitions. - for lang in data: - # Add/replace the language definition. - self.languages_dict[lang['language']] = {} - self.languages_dict[lang['language']]['type'] = '' - self.languages_dict[lang['language']]['color'] = '' - self.languages_dict[lang['language']]['extensions'] = lang['extensions'] - self.languages_dict[lang['language']]['source'] = '' - self.languages_dict[lang['language']]['ace_mode'] = 'text' - self.languages_dict[lang['language']]['language_id'] = 0 - - # Check if comment syntax for the language name already exists. - counter = 0 - exists = False - for syntax in self.syntax_dict: - if syntax['language'] == lang['language']: - exists = True - break - - counter = counter + 1 - - if exists: - # When the syntax exists it will be popped out of the list. - self.syntax_dict.pop(counter) - - # And be replaced with the new syntax definition. - self.syntax_dict.append({ - 'language': lang['language'], - 'markers': lang['markers'] - }) - except Exception: - print(f'An error occurred in the custom language file "{path}".') - print('Please check the file, or if it represents undefined behavior, ' - 'create an issue at https://github.com/alstr/todo-to-issue-action/issues.') - - # noinspection PyTypeChecker - def parse(self, diff_file): - issues = [] - - # The parser works by gradually breaking the diff file down into smaller and smaller segments. - # At each level relevant information is extracted. - - # First separate the diff into sections for each changed file. - file_hunks = re.finditer(self.FILE_HUNK_PATTERN, diff_file.read(), re.DOTALL) - last_end = None - extracted_file_hunks = [] - for i, file_hunk in enumerate(file_hunks): - extracted_file_hunks.append(file_hunk.group(0)) - last_end = file_hunk.end() - diff_file.seek(0) - extracted_file_hunks.append(diff_file.read()[last_end:]) - diff_file.close() - - code_blocks = [] - prev_block = None - # Iterate through each section extracted above. - for hunk in extracted_file_hunks: - # Extract the file information so we can figure out the Markdown language and comment syntax. - header_search = re.search(self.HEADER_PATTERN, hunk, re.MULTILINE) - if not header_search: - continue - files = header_search.group(0) - - filename_search = re.search(self.FILENAME_PATTERN, files) - if not filename_search: - continue - curr_file = filename_search.group(0) - if self._should_ignore(curr_file): - continue - curr_markers, curr_markdown_language = self._get_file_details(curr_file) - if not curr_markers or not curr_markdown_language: - print(f'Could not check "{curr_file}" for TODOs as this language is not yet supported by default.') - continue - - # Break this section down into individual changed code blocks. - line_numbers_iterator = re.finditer(self.LINE_NUMBERS_PATTERN, hunk) - for i, line_numbers in enumerate(line_numbers_iterator): - line_numbers_inner_search = re.search(self.LINE_NUMBERS_INNER_PATTERN, line_numbers.group(0)) - line_numbers_str = line_numbers_inner_search.group(0).strip('@@ -') - start_line = line_numbers_str.split(' ')[1].strip('+') - start_line = int(start_line.split(',')[0]) - - # Put this information into a temporary dict for simplicity. - block = { - 'file': curr_file, - 'markers': curr_markers, - 'markdown_language': curr_markdown_language, - 'start_line': start_line, - 'hunk': hunk, - 'hunk_start': line_numbers.end(), - 'hunk_end': None - } - - prev_index = len(code_blocks) - 1 - # Set the end of the last code block based on the start of this one. - if prev_block and prev_block['file'] == block['file']: - # noinspection PyTypedDict - code_blocks[prev_index]['hunk_end'] = line_numbers.start() - code_blocks[prev_index]['hunk'] = (prev_block['hunk'] - [prev_block['hunk_start']:line_numbers.start()]) - elif prev_block: - code_blocks[prev_index]['hunk'] = prev_block['hunk'][prev_block['hunk_start']:] - - code_blocks.append(block) - prev_block = block - - if len(code_blocks) > 0: - last_index = len(code_blocks) - 1 - last_block = code_blocks[last_index] - code_blocks[last_index]['hunk'] = last_block['hunk'][last_block['hunk_start']:] - - # Now for each code block, check for comments, then those comments for TODOs. - for block in code_blocks: - for marker in block['markers']: - # Check if there are line or block comments. - if marker['type'] == 'line': - # Add a negative lookup to include the second character from alternative comment patterns. - # This step is essential to handle cases like in Julia, where '#' and '#=' are comment patterns. - # It ensures that when a space after the comment is optional ('\s' => '\s*'), - # the second character would be matched because of the any character expression ('.+'). - suff_escape_list = [] - pref_escape_list = [] - for to_escape in block['markers']: - if to_escape['type'] == 'line': - if to_escape['pattern'] == marker['pattern']: - continue - if marker['pattern'][0] == to_escape['pattern'][0]: - suff_escape_list.append(self._extract_character(to_escape['pattern'], 1)) - else: - # Block comments and line comments cannot have the same comment pattern, - # so a check if the string is the same is unnecessary. - if to_escape['pattern']['start'][0] == marker['pattern'][0]: - suff_escape_list.append(self._extract_character(to_escape['pattern']['start'], 1)) - search = to_escape['pattern']['end'].find(marker['pattern']) - if search != -1: - pref_escape_list.append(self._extract_character(to_escape['pattern']['end'], - search - 1)) - - comment_pattern = (r'(^.*' - + (r'(? 0 - else '') - + marker['pattern'] - + (r'(?!(' + '|'.join(suff_escape_list) + r'))' if len(suff_escape_list) > 0 - else '') - + r'\s*.+$)') - comments = re.finditer(comment_pattern, block['hunk'], re.MULTILINE) - extracted_comments = [] - prev_comment = None - for i, comment in enumerate(comments): - if prev_comment and comment.start() == prev_comment.end() + 1: - extracted_comments[len(extracted_comments) - 1].append(comment) - else: - extracted_comments.append([comment]) - prev_comment = comment - for comment in extracted_comments: - extracted_issues = self._extract_issue_if_exists(comment, marker, block) - if extracted_issues: - issues.extend(extracted_issues) - else: - comment_pattern = (r'(?:[+\-\s]\s*' + marker['pattern']['start'] + r'.*?' - + marker['pattern']['end'] + ')') - comments = re.finditer(comment_pattern, block['hunk'], re.DOTALL) - extracted_comments = [] - for i, comment in enumerate(comments): - if re.search('|'.join(self.identifiers), comment.group(0)): - extracted_comments.append([comment]) - - for comment in extracted_comments: - extracted_issues = self._extract_issue_if_exists(comment, marker, block) - if extracted_issues: - issues.extend(extracted_issues) - - for i, issue in enumerate(issues): - # Strip some of the diff symbols so it can be included as a code snippet in the issue body. - # Strip removed lines. - cleaned_hunk = re.sub(r'\n^-.*$', '', issue.hunk, 0, re.MULTILINE) - # Strip leading symbols/whitespace. - cleaned_hunk = re.sub(r'^.', '', cleaned_hunk, 0, re.MULTILINE) - # Strip newline message. - cleaned_hunk = re.sub(r'\n\sNo newline at end of file', '', cleaned_hunk, 0, re.MULTILINE) - issue.hunk = cleaned_hunk - - return issues - - def _get_language_details(self, language_name, attribute, value): - """Try and get the Markdown language and comment syntax data based on a specified attribute of the language.""" - attributes = [at.lower() for at in self.languages_dict[language_name][attribute]] - if value.lower() in attributes: - for syntax_details in self.syntax_dict: - if syntax_details['language'] == language_name: - return syntax_details['markers'], self.languages_dict[language_name]['ace_mode'] - return None, None - - def _get_file_details(self, file): - """Try and get the Markdown language and comment syntax data for the given file.""" - file_name, extension = os.path.splitext(os.path.basename(file)) - for language_name in self.languages_dict: - # Check if the file extension matches the language's extensions. - if extension != '' and 'extensions' in self.languages_dict[language_name]: - syntax_details, ace_mode = self._get_language_details(language_name, 'extensions', extension) - if syntax_details is not None and ace_mode is not None: - return syntax_details, ace_mode - # Check if the file name matches the language's filenames. - if 'filenames' in self.languages_dict[language_name]: - syntax_details, ace_mode = self._get_language_details(language_name, 'filenames', file_name) - if syntax_details is not None and ace_mode is not None: - return syntax_details, ace_mode - return None, None - - def _extract_issue_if_exists(self, comment, marker, code_block): - """Check this comment for TODOs, and if found, build an Issue object.""" - curr_issue = None - found_issues = [] - line_statuses = [] - prev_line_title = False - for match in comment: - comment_lines = match.group().split('\n') - for line in comment_lines: - line_status, committed_line = self._get_line_status(line) - line_statuses.append(line_status) - cleaned_line = self._clean_line(committed_line, marker) - line_title, ref, identifier = self._get_title(cleaned_line) - if line_title: - if prev_line_title and line_status == line_statuses[-2]: - # This means that there is a separate one-line TODO directly above this one. - # We need to store the previous one. - curr_issue.status = line_status - found_issues.append(curr_issue) - curr_issue = Issue( - title=line_title, - labels=[], - assignees=[], - milestone=None, - body=[], - hunk=code_block['hunk'], - file_name=code_block['file'], - start_line=code_block['start_line'], - num_lines=1, - markdown_language=code_block['markdown_language'], - status=line_status, - identifier=identifier, - ref=ref, - issue_url=None, - issue_number=None - ) - prev_line_title = True - - # Calculate the file line number that this issue references. - hunk_lines = re.finditer(self.LINE_PATTERN, code_block['hunk'], re.MULTILINE) - start_line = code_block['start_line'] - for i, hunk_line in enumerate(hunk_lines): - if hunk_line.group(0) == line: - curr_issue.start_line = start_line - break - if i != 0 and (hunk_line.group(0).startswith('+') or not hunk_line.group(0).startswith('-')): - start_line += 1 - - elif curr_issue: - # Extract other issue information that may exist below the title. - line_labels = self._get_labels(cleaned_line) - line_assignees = self._get_assignees(cleaned_line) - line_milestone = self._get_milestone(cleaned_line) - line_url = self._get_issue_url(cleaned_line) - if line_labels: - curr_issue.labels.extend(line_labels) - elif line_assignees: - curr_issue.assignees.extend(line_assignees) - elif line_milestone: - curr_issue.milestone = line_milestone - elif line_url: - curr_issue.issue_url = line_url - issue_number_search = self.ISSUE_NUMBER_PATTERN.search(line_url) - if issue_number_search: - curr_issue.issue_number = issue_number_search.group(1) - elif len(cleaned_line) and line_status != LineStatus.DELETED: - if self.should_escape: - curr_issue.body.append(self._escape_markdown(cleaned_line)) - else: - curr_issue.body.append(cleaned_line) - if not line.startswith('-'): - curr_issue.num_lines += 1 - if not line_title: - prev_line_title = False - if curr_issue is not None and curr_issue.identifier is not None and self.identifiers_dict is not None: - for identifier_dict in self.identifiers_dict: - if identifier_dict['name'] == curr_issue.identifier: - for label in identifier_dict['labels']: - if label not in curr_issue.labels: - curr_issue.labels.append(label) - - if curr_issue is not None: - # If all the lines are unchanged, don't do anything. - if all(s == LineStatus.UNCHANGED for s in line_statuses): - return None - # If the title line hasn't changed, but the info below has, we need to mark it as an update (addition). - if (curr_issue.status == LineStatus.UNCHANGED - and (LineStatus.ADDED in line_statuses or LineStatus.DELETED in line_statuses)): - curr_issue.status = LineStatus.ADDED - - found_issues.append(curr_issue) - - return found_issues - - @staticmethod - def _escape_markdown(comment): - # All basic characters according to: https://www.markdownguide.org/basic-syntax - must_escape = ['\\', '<', '>', '#', '`', '*', '_', '[', ']', '(', ')', '!', '+', '-', '.', '|', '{', '}', '~', - '='] - - escaped = '' - - # Linear Escape Algorithm, because the algorithm ends in an infinite loop when using the function 'replace', - # which tries to replace all backslashes with duplicate backslashes, i.e. also the already other escaped - # characters. - for c in comment: - if c in must_escape: - escaped += '\\' + c - else: - escaped += c - return escaped - - @staticmethod - def _extract_character(input_str, pos): - # Extracts a character from the input string at the specified position, - # considering escape sequences when applicable. - # Test cases - # print(_extract_character("/\\*", 1)) # Output: "\*" - # print(_extract_character("\\*", 0)) # Output: "\*" - # print(_extract_character("\\", 0)) # Output: "\\" - # print(_extract_character("w", 0)) # Output: "w" - # print(_extract_character("wa", 1)) # Output: "a" - # print(_extract_character("\\\\w", 1)) # Output: "\\" - if input_str[pos] == '\\': - if pos >= 1 and not input_str[pos - 1] == '\\' and len(input_str) > pos + 1: - return '\\' + input_str[pos + 1] - return '\\\\' - if pos >= 1: - if input_str[pos - 1] == '\\': - return '\\' + input_str[pos] - return input_str[pos] - - def _get_line_status(self, comment): - """Return a Tuple indicating whether this is an addition/deletion/unchanged, plus the cleaned comment.""" - addition_search = self.ADDITION_PATTERN.search(comment) - if addition_search: - return LineStatus.ADDED, addition_search.group(0) - else: - deletion_search = self.DELETION_PATTERN.search(comment) - if deletion_search: - return LineStatus.DELETED, deletion_search.group(0) - return LineStatus.UNCHANGED, comment[1:] - - @staticmethod - def _clean_line(comment, marker): - """Remove unwanted symbols and whitespace.""" - comment = comment.strip() - if marker['type'] == 'block': - start_pattern = r'^' + marker['pattern']['start'] - end_pattern = marker['pattern']['end'] + r'$' - comment = re.sub(start_pattern, '', comment) - comment = re.sub(end_pattern, '', comment) - # Some block comments might have an asterisk on each line. - if '*' in start_pattern and comment.startswith('*'): - comment = comment.lstrip('*') - else: - pattern = r'^' + marker['pattern'] - comment = re.sub(pattern, '', comment) - return comment.strip() - - def _get_title(self, comment): - """Check the passed comment for a new issue title (and reference, if specified).""" - title = None - ref = None - title_identifier = None - for identifier in self.identifiers: - title_identifier = identifier - title_pattern = re.compile(fr'(?<={identifier}[\s:]).+', re.IGNORECASE) - title_search = title_pattern.search(comment, re.IGNORECASE) - if title_search: - title = title_search.group(0).strip(': ') - break - else: - title_ref_pattern = re.compile(fr'(?<={identifier}\().+', re.IGNORECASE) - title_ref_search = title_ref_pattern.search(comment, re.IGNORECASE) - if title_ref_search: - title = title_ref_search.group(0).strip() - ref_search = self.REF_PATTERN.search(title) - if ref_search: - ref = ref_search.group(0) - title = title.replace(ref, '', 1).lstrip(':) ') - break - return title, ref, title_identifier - - def _get_issue_url(self, comment): - """Check the passed comment for a GitHub issue URL.""" - url_search = self.ISSUE_URL_PATTERN.search(comment, re.IGNORECASE) - url = None - if url_search: - url = url_search.group(0) - parsed_url = urlparse(url) - return url if all([parsed_url.scheme, parsed_url.netloc]) else None - return url - - def _get_labels(self, comment): - """Check the passed comment for issue labels.""" - labels_search = self.LABELS_PATTERN.search(comment, re.IGNORECASE) - labels = [] - if labels_search: - labels = labels_search.group(0).replace(', ', ',') - labels = list(filter(None, labels.split(','))) - return labels - - def _get_assignees(self, comment): - """Check the passed comment for issue assignees.""" - assignees_search = self.ASSIGNEES_PATTERN.search(comment, re.IGNORECASE) - assignees = [] - if assignees_search: - assignees = assignees_search.group(0).replace(', ', ',') - assignees = list(filter(None, assignees.split(','))) - return assignees - - def _get_milestone(self, comment): - """Check the passed comment for a milestone.""" - milestone_search = self.MILESTONE_PATTERN.search(comment, re.IGNORECASE) - milestone = None - if milestone_search: - milestone = milestone_search.group(0) - return milestone - - # noinspection PyMethodMayBeStatic - def _should_ignore(self, file): - ignore_patterns = os.getenv('INPUT_IGNORE', None) - if ignore_patterns: - for pattern in filter(None, [pattern.strip() for pattern in ignore_patterns.split(',')]): - if re.match(pattern, file): - return True - return False +from TodoParser import * +from LineStatus import * +from GitHubClient import * if __name__ == "__main__": From 714153eaf3802101df34c0d99395db1a297756c8 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Thu, 24 Oct 2024 22:55:44 +0000 Subject: [PATCH 02/20] feat: add Local client This is just a safe fallback for local testing. If environment variables which activate the creation of the GitHub client aren't set, then the Local client is created. It acts on the most recent commit of the repo in the working directory. Minor edit to GitHubClient so that it raises an EnvironmentError exception if INPUT_GITHUB_URL environment variable is not defined. This allows main to detect the error and fall back to trying to use the Local client --- GitHubClient.py | 2 ++ LocalClient.py | 16 ++++++++++++++++ main.py | 13 +++++++++++-- 3 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 LocalClient.py diff --git a/GitHubClient.py b/GitHubClient.py index fa5453b..5513f9d 100644 --- a/GitHubClient.py +++ b/GitHubClient.py @@ -9,6 +9,8 @@ class GitHubClient(object): def __init__(self): self.github_url = os.getenv('INPUT_GITHUB_URL') + if not self.github_url: + raise EnvironmentError self.base_url = f'{self.github_url}/' self.repos_url = f'{self.base_url}repos/' self.repo = os.getenv('INPUT_REPO') diff --git a/LocalClient.py b/LocalClient.py new file mode 100644 index 0000000..9d0b606 --- /dev/null +++ b/LocalClient.py @@ -0,0 +1,16 @@ +import subprocess + +class LocalClient(object): + def __init__(self): + self.diff_url = None + self.commits = ['placeholder'] # content doesn't matter, just length + self.insert_issue_urls = False + + def get_last_diff(self): + return subprocess.run(['git', 'diff', 'HEAD^..HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8') + + def create_issue(self, issue): + return [201, None] + + def close_issue(self, issue): + return 200 diff --git a/main.py b/main.py index 28d2242..dd1d7c6 100644 --- a/main.py +++ b/main.py @@ -11,12 +11,21 @@ import operator from collections import defaultdict from TodoParser import * from LineStatus import * +from LocalClient import * from GitHubClient import * if __name__ == "__main__": - # Create a basic client for communicating with GitHub, automatically initialised with environment variables. - client = GitHubClient() + # Try to create a basic client for communicating with the remote version control server, automatically initialised with environment variables. + try: + # try to build a GitHub client + client = GitHubClient() + except EnvironmentError: + # don't immediately give up + client = None + # if needed, fall back to using a local client for testing + client = client or LocalClient() + # Check to see if the workflow has been run manually. # If so, adjust the client SHA and diff URL to use the manually supplied inputs. manual_commit_ref = os.getenv('MANUAL_COMMIT_REF') From c05a9e3f1acc5793ffc71c493b285c45f793c497 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Thu, 24 Oct 2024 22:36:26 +0000 Subject: [PATCH 03/20] refactor: move GitHub-specific code into class Add a new method, get_issue_url(), which returns the VCS-specific web URL for viewing an issue. This results in moving GitHub-specific code from main into GitHubClient, but with no change in behavior. --- GitHubClient.py | 2 ++ main.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/GitHubClient.py b/GitHubClient.py index 5513f9d..dea1daa 100644 --- a/GitHubClient.py +++ b/GitHubClient.py @@ -351,3 +351,5 @@ class GitHubClient(object): return pr_update_request.status_code return pr_request.status_code + def get_issue_url(self, new_issue_number): + return f'Issue URL: {self.line_base_url}{self.repo}/issues/{new_issue_number}' diff --git a/main.py b/main.py index dd1d7c6..7f7568b 100644 --- a/main.py +++ b/main.py @@ -102,7 +102,7 @@ if __name__ == "__main__": # Duplicate the line to retain the comment syntax. new_line = file_lines[line_number] remove = fr'{raw_issue.identifier}.*{raw_issue.title}' - insert = f'Issue URL: {client.line_base_url}{client.repo}/issues/{new_issue_number}' + insert = client.get_issue_url(new_issue_number) new_line = re.sub(remove, insert, new_line) # Check if the URL line already exists, if so abort. if line_number == len(file_lines) - 1 or file_lines[line_number + 1] != new_line: From 3753baeb0aa33a1f6c20ac6c80809d6d11748178 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Fri, 25 Oct 2024 02:48:33 +0000 Subject: [PATCH 04/20] fix: copy all modules into Docker image --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 0897921..5ab27cf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM python:3-slim AS builder -ADD main.py /app/main.py +ADD *.py /app/ WORKDIR /app RUN pip install --target=/app requests From 9b24f44c630cdf4403e20bb37754220b533fd99d Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Fri, 25 Oct 2024 18:04:21 +0000 Subject: [PATCH 05/20] refactor: clean up imports --- TodoParser.py | 4 ++-- main.py | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/TodoParser.py b/TodoParser.py index ffcf242..33006c4 100644 --- a/TodoParser.py +++ b/TodoParser.py @@ -2,8 +2,8 @@ import os import re import requests from ruamel.yaml import YAML -from LineStatus import * -from Issue import * +from LineStatus import LineStatus +from Issue import Issue import requests import json from urllib.parse import urlparse diff --git a/main.py b/main.py index 7f7568b..b0dd63f 100644 --- a/main.py +++ b/main.py @@ -5,14 +5,13 @@ import os import re from time import sleep from io import StringIO -from ruamel.yaml import YAML import itertools import operator from collections import defaultdict -from TodoParser import * -from LineStatus import * -from LocalClient import * -from GitHubClient import * +from TodoParser import TodoParser +from LineStatus import LineStatus +from LocalClient import LocalClient +from GitHubClient import GitHubClient if __name__ == "__main__": From d6553dba6928352c306a0160e0f749151aa8ec0a Mon Sep 17 00:00:00 2001 From: Alastair Mooney Date: Fri, 25 Oct 2024 19:42:25 +0100 Subject: [PATCH 06/20] Update TodoParser.py --- TodoParser.py | 1 - 1 file changed, 1 deletion(-) diff --git a/TodoParser.py b/TodoParser.py index 33006c4..6101704 100644 --- a/TodoParser.py +++ b/TodoParser.py @@ -1,6 +1,5 @@ import os import re -import requests from ruamel.yaml import YAML from LineStatus import LineStatus from Issue import Issue From 2157658a7da0a0b3c0a53b87f2745a99b23db524 Mon Sep 17 00:00:00 2001 From: Alastair Mooney Date: Fri, 25 Oct 2024 19:43:01 +0100 Subject: [PATCH 07/20] Update main.py --- main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/main.py b/main.py index b0dd63f..49c5ced 100644 --- a/main.py +++ b/main.py @@ -1,8 +1,6 @@ # -*- coding: utf-8 -*- """Convert IDE TODOs to GitHub issues.""" -import os -import re from time import sleep from io import StringIO import itertools From 67fd1f496c15461a6d95d321ef5d7cef12cf228e Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Fri, 25 Oct 2024 14:20:23 +0000 Subject: [PATCH 08/20] fix: reset modified environment variables after test CustomLanguageTest was modifying a few environment variables and not restoring them when complete. This led to problems for later tests that expected those variables to be at their default values. --- tests/test_todo_parser.py | 63 +++++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 25 deletions(-) diff --git a/tests/test_todo_parser.py b/tests/test_todo_parser.py index bf3d5a8..df25c9c 100644 --- a/tests/test_todo_parser.py +++ b/tests/test_todo_parser.py @@ -256,21 +256,34 @@ class EscapeMarkdownTest(unittest.TestCase): self.assertEqual(issue.body[1], '\\') -class CustomLanguageTest(unittest.TestCase): - def test_custom_lang_load(self): +class BaseCustomLanguageTests: + class BaseTest(unittest.TestCase): + @staticmethod + def count_syntax(parser: TodoParser, name: str): + counter = 0 + + for syntax in parser.syntax_dict: + if syntax['language'] == name: + counter = counter + 1 + + return counter + + +class CustomLanguageFileTest(BaseCustomLanguageTests.BaseTest): + def setUp(self): os.environ['INPUT_LANGUAGES'] = 'tests/custom_languages.json' - parser = TodoParser() + self.parser = TodoParser() + + def test_custom_lang_load(self): # Test if the custom language ILS is actually loaded into the system - self.assertIsNotNone(parser.languages_dict['ILS']) - self.assertEqual(self.count_syntax(parser, 'ILS'), 1) + self.assertIsNotNone(self.parser.languages_dict['ILS']) + self.assertEqual(self.count_syntax(self.parser, 'ILS'), 1) def test_custom_lang_not_dupplicate(self): - os.environ['INPUT_LANGUAGES'] = 'tests/custom_languages.json' - parser = TodoParser() # Test if a custom language can overwrite the rules of an existing one - self.assertEqual(self.count_syntax(parser, 'Java'), 1) - for syntax in parser.syntax_dict: + self.assertEqual(self.count_syntax(self.parser, 'Java'), 1) + for syntax in self.parser.syntax_dict: if syntax['language'] == 'Java': self.assertEqual(len(syntax['markers']), 2) self.assertEqual(syntax['markers'][0]['pattern'], "////") @@ -278,24 +291,24 @@ class CustomLanguageTest(unittest.TestCase): self.assertEqual(syntax['markers'][1]['pattern']['end'], '=+') break - self.assertIsNotNone(parser.languages_dict['Java']) - self.assertEqual(len(parser.languages_dict['Java']['extensions']), 1) - self.assertEqual(parser.languages_dict['Java']['extensions'][0], ".java2") + self.assertIsNotNone(self.parser.languages_dict['Java']) + self.assertEqual(len(self.parser.languages_dict['Java']['extensions']), 1) + self.assertEqual(self.parser.languages_dict['Java']['extensions'][0], ".java2") - def test_url_load(self): + def tearDown(self): + del os.environ['INPUT_LANGUAGES'] + + +class CustomLanguageUrlTest(BaseCustomLanguageTests.BaseTest): + def setUp(self): os.environ['INPUT_LANGUAGES'] = 'https://raw.githubusercontent.com/alstr/todo-to-issue-action/master/tests/custom_languages.json' os.environ['INPUT_NO_STANDARD'] = 'true' - parser = TodoParser() + self.parser = TodoParser() - self.assertEqual(len(parser.languages_dict), 2) - self.assertEqual(len(parser.syntax_dict), 2) + def test_url_load(self): + self.assertEqual(len(self.parser.languages_dict), 2) + self.assertEqual(len(self.parser.syntax_dict), 2) - @staticmethod - def count_syntax(parser: TodoParser, name: str): - counter = 0 - - for syntax in parser.syntax_dict: - if syntax['language'] == name: - counter = counter + 1 - - return counter + def tearDown(self): + del os.environ['INPUT_LANGUAGES'] + del os.environ['INPUT_NO_STANDARD'] From b1d707856d7fc93c67b27131776bb520a9e6ec86 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:01:08 +0000 Subject: [PATCH 09/20] Revert "Update main.py" This reverts commit 2157658a7da0a0b3c0a53b87f2745a99b23db524. --- main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/main.py b/main.py index 49c5ced..b0dd63f 100644 --- a/main.py +++ b/main.py @@ -1,6 +1,8 @@ # -*- coding: utf-8 -*- """Convert IDE TODOs to GitHub issues.""" +import os +import re from time import sleep from io import StringIO import itertools From 1674ad54d4d1566b6ca18cd2fd2b643e0211dd05 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Sat, 26 Oct 2024 20:22:18 +0000 Subject: [PATCH 10/20] refactor: get_issue_url() returns *just* the URL Leave the extra text to the caller (main) --- GitHubClient.py | 2 +- main.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/GitHubClient.py b/GitHubClient.py index dea1daa..f38db91 100644 --- a/GitHubClient.py +++ b/GitHubClient.py @@ -352,4 +352,4 @@ class GitHubClient(object): return pr_request.status_code def get_issue_url(self, new_issue_number): - return f'Issue URL: {self.line_base_url}{self.repo}/issues/{new_issue_number}' + return f'{self.line_base_url}{self.repo}/issues/{new_issue_number}' diff --git a/main.py b/main.py index b0dd63f..a16f753 100644 --- a/main.py +++ b/main.py @@ -101,7 +101,7 @@ if __name__ == "__main__": # Duplicate the line to retain the comment syntax. new_line = file_lines[line_number] remove = fr'{raw_issue.identifier}.*{raw_issue.title}' - insert = client.get_issue_url(new_issue_number) + insert = f'Issue URL: {client.get_issue_url(new_issue_number)}' new_line = re.sub(remove, insert, new_line) # Check if the URL line already exists, if so abort. if line_number == len(file_lines) - 1 or file_lines[line_number + 1] != new_line: From 02c158cabe7d16d037c368e125df25047714746f Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Sat, 26 Oct 2024 03:30:48 +0000 Subject: [PATCH 11/20] refactor: abstract diff method within client There's no reason to have main directly construct the URL. A different VCS (such as GitLab) could have a different URL construction. Further, in the case of LocalClient, there is no URL. --- GitHubClient.py | 20 ++++++++++++++++++-- LocalClient.py | 25 ++++++++++++++++++++++++- main.py | 21 +++++---------------- 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/GitHubClient.py b/GitHubClient.py index f38db91..15db432 100644 --- a/GitHubClient.py +++ b/GitHubClient.py @@ -17,7 +17,7 @@ class GitHubClient(object): self.before = os.getenv('INPUT_BEFORE') self.sha = os.getenv('INPUT_SHA') self.commits = json.loads(os.getenv('INPUT_COMMITS')) or [] - self.diff_url = os.getenv('INPUT_DIFF_URL') + self.__init_diff_url__() self.token = os.getenv('INPUT_TOKEN') self.issues_url = f'{self.repos_url}{self.repo}/issues' self.milestones_url = f'{self.repos_url}{self.repo}/milestones' @@ -45,6 +45,20 @@ class GitHubClient(object): # Populate milestones so we can perform a lookup if one is specified. self._get_milestones() + def __init_diff_url__(self): + manual_commit_ref = os.getenv('MANUAL_COMMIT_REF') + manual_base_ref = os.getenv('MANUAL_BASE_REF') + if manual_commit_ref: + self.sha = manual_commit_ref + if manual_commit_ref and manual_base_ref: + print(f'Manually comparing {manual_base_ref}...{manual_commit_ref}') + self.diff_url = f'{self.repos_url}{self.repo}/compare/{manual_base_ref}...{manual_commit_ref}' + elif manual_commit_ref: + print(f'Manual checking {manual_commit_ref}') + self.diff_url = f'{self.repos_url}{self.repo}/commits/{manual_commit_ref}' + else: + self.diff_url = os.getenv('INPUT_DIFF_URL') + def get_last_diff(self): """Get the last diff.""" if self.diff_url: @@ -56,10 +70,12 @@ class GitHubClient(object): elif len(self.commits) == 1: # There is only one commit. diff_url = f'{self.repos_url}{self.repo}/commits/{self.sha}' - else: + elif len(self.commits) > 1: # There are several commits: compare with the oldest one. oldest = sorted(self.commits, key=self._get_timestamp)[0]['id'] diff_url = f'{self.repos_url}{self.repo}/compare/{oldest}...{self.sha}' + else: + return None diff_headers = { 'Accept': 'application/vnd.github.v3.diff', diff --git a/LocalClient.py b/LocalClient.py index 9d0b606..b15a495 100644 --- a/LocalClient.py +++ b/LocalClient.py @@ -1,13 +1,36 @@ import subprocess +import os class LocalClient(object): def __init__(self): self.diff_url = None self.commits = ['placeholder'] # content doesn't matter, just length self.insert_issue_urls = False + self.__set_diff_refs__() + + def __set_diff_refs__(self): + # set the target of the comparison to user-specified value, if + # provided, falling back to HEAD + manual_commit_ref = os.getenv('MANUAL_COMMIT_REF') + if manual_commit_ref: + self.sha = manual_commit_ref + else: + self.sha = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() + # set the soruce of the comparison to user-specified value, if + # provided, falling back to commit immediately before the target + manual_base_ref = os.getenv('MANUAL_BASE_REF') + if manual_base_ref: + self.base_ref = manual_base_ref + else: + self.base_ref = subprocess.run(['git', 'rev-parse', f'{self.sha}^'], stdout=subprocess.PIPE).stdout.decode('utf-8').strip() + # print feedback to the user + if manual_commit_ref and manual_base_ref: + print(f'Manually comparing {manual_base_ref}...{manual_commit_ref}') + elif manual_commit_ref: + print(f'Manual checking {manual_commit_ref}') def get_last_diff(self): - return subprocess.run(['git', 'diff', 'HEAD^..HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8') + return subprocess.run(['git', 'diff', f'{self.base_ref}..{self.sha}'], stdout=subprocess.PIPE).stdout.decode('latin-1') def create_issue(self, issue): return [201, None] diff --git a/main.py b/main.py index a16f753..63552d0 100644 --- a/main.py +++ b/main.py @@ -25,23 +25,12 @@ if __name__ == "__main__": # if needed, fall back to using a local client for testing client = client or LocalClient() - # Check to see if the workflow has been run manually. - # If so, adjust the client SHA and diff URL to use the manually supplied inputs. - manual_commit_ref = os.getenv('MANUAL_COMMIT_REF') - manual_base_ref = os.getenv('MANUAL_BASE_REF') - if manual_commit_ref: - client.sha = manual_commit_ref - if manual_commit_ref and manual_base_ref: - print(f'Manually comparing {manual_base_ref}...{manual_commit_ref}') - client.diff_url = f'{client.repos_url}{client.repo}/compare/{manual_base_ref}...{manual_commit_ref}' - elif manual_commit_ref: - print(f'Manual checking {manual_commit_ref}') - client.diff_url = f'{client.repos_url}{client.repo}/commits/{manual_commit_ref}' - if client.diff_url or len(client.commits) != 0: - # Get the diff from the last pushed commit. - last_diff = StringIO(client.get_last_diff()) + # Get the diff from the last pushed commit. + last_diff = client.get_last_diff() + + if last_diff: # Parse the diff for TODOs and create an Issue object for each. - raw_issues = TodoParser().parse(last_diff) + raw_issues = TodoParser().parse(StringIO(last_diff)) # This is a simple, non-perfect check to filter out any TODOs that have just been moved. # It looks for items that appear in the diff as both an addition and deletion. # It is based on the assumption that TODOs will not have identical titles in identical files. From 1fa135d86b39ad1c27d9a0ba89e8c9131a4bcb31 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Sat, 26 Oct 2024 20:27:12 +0000 Subject: [PATCH 12/20] feat: provide additional info about issue being processed --- main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.py b/main.py index 63552d0..f02fe32 100644 --- a/main.py +++ b/main.py @@ -75,7 +75,7 @@ if __name__ == "__main__": # Cycle through the Issue objects and create or close a corresponding GitHub issue for each. for j, raw_issue in enumerate(issues_to_process): - print(f'Processing issue {j + 1} of {len(issues_to_process)}') + print(f"Processing issue {j + 1} of {len(issues_to_process)}: '{raw_issue.title}' @ {raw_issue.file_name}:{raw_issue.start_line}") if raw_issue.status == LineStatus.ADDED: status_code, new_issue_number = client.create_issue(raw_issue) if status_code == 201: From a4edef676c576872c03e4ad7d2ba993991e4173e Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Sat, 26 Oct 2024 21:32:10 +0000 Subject: [PATCH 13/20] feat: add get_issue_url() This is really just a dummy operation so that main can unconditionally make the call on any client --- LocalClient.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/LocalClient.py b/LocalClient.py index b15a495..adbb912 100644 --- a/LocalClient.py +++ b/LocalClient.py @@ -37,3 +37,6 @@ class LocalClient(object): def close_issue(self, issue): return 200 + + def get_issue_url(self, new_issue_number): + return "N/A" From 21f86f529e0e4794f66585d4e8b71a7acac03790 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Sat, 26 Oct 2024 22:16:11 +0000 Subject: [PATCH 14/20] feat: provide more info about issue create/update --- main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index f02fe32..0462e3e 100644 --- a/main.py +++ b/main.py @@ -79,7 +79,7 @@ if __name__ == "__main__": if raw_issue.status == LineStatus.ADDED: status_code, new_issue_number = client.create_issue(raw_issue) if status_code == 201: - print('Issue created') + print(f'Issue created: : #{new_issue_number} @ {client.get_issue_url(new_issue_number)}') # Check to see if we should insert the issue URL back into the linked TODO. # Don't insert URLs for comments. Comments do not get updated. if client.insert_issue_urls and not (raw_issue.ref and raw_issue.ref.startswith('#')): @@ -98,7 +98,7 @@ if __name__ == "__main__": with open(raw_issue.file_name, 'w') as issue_file: issue_file.writelines(file_lines) elif status_code == 200: - print('Issue updated') + print(f'Issue updated: : #{new_issue_number} @ {client.get_issue_url(new_issue_number)}') else: print('Issue could not be created') elif raw_issue.status == LineStatus.DELETED and os.getenv('INPUT_CLOSE_ISSUES', 'true') == 'true': From bd01636b00d4229741ce7240b153be9edbba7a26 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Mon, 28 Oct 2024 15:21:38 +0000 Subject: [PATCH 15/20] refactor: move insert_issue_urls logic to main --- GitHubClient.py | 1 - main.py | 6 ++++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/GitHubClient.py b/GitHubClient.py index 15db432..0c56049 100644 --- a/GitHubClient.py +++ b/GitHubClient.py @@ -34,7 +34,6 @@ class GitHubClient(object): self.line_break = '\n\n' if auto_p else '\n' self.auto_assign = os.getenv('INPUT_AUTO_ASSIGN', 'false') == 'true' self.actor = os.getenv('INPUT_ACTOR') - self.insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true' if self.base_url == 'https://api.github.com/': self.line_base_url = 'https://github.com/' else: diff --git a/main.py b/main.py index 0462e3e..676a5cb 100644 --- a/main.py +++ b/main.py @@ -73,6 +73,9 @@ if __name__ == "__main__": issues_to_process = [issue for issue in issues_to_process if not (issue.issue_url in update_and_close_issues and issue.status == LineStatus.DELETED)] + # Check to see if we should insert the issue URL back into the linked TODO. + insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true' + # Cycle through the Issue objects and create or close a corresponding GitHub issue for each. for j, raw_issue in enumerate(issues_to_process): print(f"Processing issue {j + 1} of {len(issues_to_process)}: '{raw_issue.title}' @ {raw_issue.file_name}:{raw_issue.start_line}") @@ -80,9 +83,8 @@ if __name__ == "__main__": status_code, new_issue_number = client.create_issue(raw_issue) if status_code == 201: print(f'Issue created: : #{new_issue_number} @ {client.get_issue_url(new_issue_number)}') - # Check to see if we should insert the issue URL back into the linked TODO. # Don't insert URLs for comments. Comments do not get updated. - if client.insert_issue_urls and not (raw_issue.ref and raw_issue.ref.startswith('#')): + if insert_issue_urls and not (raw_issue.ref and raw_issue.ref.startswith('#')): line_number = raw_issue.start_line - 1 with open(raw_issue.file_name, 'r') as issue_file: file_lines = issue_file.readlines() From eefdda59f9431015beaeb21b0bd8cc3a3d81bf34 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Sat, 26 Oct 2024 03:31:26 +0000 Subject: [PATCH 16/20] chore: add devcontainer --- .devcontainer/devcontainer.json | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..ce3acb4 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,22 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "pip3 install --user -r requirements.txt" + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} From 7435acbd70f20e49bb2ec8203789aa1e98d269a2 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Fri, 25 Oct 2024 15:56:44 +0000 Subject: [PATCH 17/20] feat: add additional Dockerfile target Add additional, non-default, Dockerfile target which installs app into an ubuntu image. This is useful for local development and enables other operations (such as committing back to the repo) to occur within the container. --- Dockerfile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Dockerfile b/Dockerfile index 5ab27cf..184cec7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,6 +6,13 @@ RUN pip install --target=/app requests RUN pip install --target=/app -U pip setuptools wheel RUN pip install --target=/app ruamel.yaml +FROM ubuntu AS ubuntu-runtime +RUN apt update -y && apt install -y python3 git +COPY --from=builder /app /app +WORKDIR /app +ENV PYTHONPATH /app +CMD ["python3", "/app/main.py"] + FROM gcr.io/distroless/python3-debian12 COPY --from=builder /app /app WORKDIR /app From ef72f61bedab1f9241768f686ac8d8afc141e95d Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Mon, 28 Oct 2024 17:59:15 +0000 Subject: [PATCH 18/20] refactor: address mypy type warning --- main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index 676a5cb..f73f9b1 100644 --- a/main.py +++ b/main.py @@ -36,9 +36,9 @@ if __name__ == "__main__": # It is based on the assumption that TODOs will not have identical titles in identical files. # That is about as good as we can do for TODOs without issue URLs. issues_to_process = [] - for values, similar_issues in itertools.groupby(raw_issues, key=operator.attrgetter('title', 'file_name', + for values, similar_issues_iter in itertools.groupby(raw_issues, key=operator.attrgetter('title', 'file_name', 'markdown_language')): - similar_issues = list(similar_issues) + similar_issues = list(similar_issues_iter) if (len(similar_issues) == 2 and all(issue.issue_url is None for issue in similar_issues) and ((similar_issues[0].status == LineStatus.ADDED and similar_issues[1].status == LineStatus.DELETED) From f1f17d83720da0ce5e7b03f4ab0c016ebad1705e Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Mon, 28 Oct 2024 18:01:56 +0000 Subject: [PATCH 19/20] refactor: add common base class to GitHubClient and LocalClient Enables adding type checking to minimize mypy errors --- Client.py | 12 ++++++++++++ GitHubClient.py | 3 ++- LocalClient.py | 12 ++---------- main.py | 4 +++- 4 files changed, 19 insertions(+), 12 deletions(-) create mode 100644 Client.py diff --git a/Client.py b/Client.py new file mode 100644 index 0000000..5d9fdf6 --- /dev/null +++ b/Client.py @@ -0,0 +1,12 @@ +class Client(object): + def get_last_diff(self): + return None + + def create_issue(self, issue): + return [201, None] + + def close_issue(self, issue): + return 200 + + def get_issue_url(self, new_issue_number): + return "N/A" diff --git a/GitHubClient.py b/GitHubClient.py index 0c56049..4e3dc28 100644 --- a/GitHubClient.py +++ b/GitHubClient.py @@ -1,8 +1,9 @@ import os import requests import json +from Client import Client -class GitHubClient(object): +class GitHubClient(Client): """Basic client for getting the last diff and managing issues.""" existing_issues = [] milestones = [] diff --git a/LocalClient.py b/LocalClient.py index adbb912..4c8df17 100644 --- a/LocalClient.py +++ b/LocalClient.py @@ -1,7 +1,8 @@ import subprocess import os +from Client import Client -class LocalClient(object): +class LocalClient(Client): def __init__(self): self.diff_url = None self.commits = ['placeholder'] # content doesn't matter, just length @@ -31,12 +32,3 @@ class LocalClient(object): def get_last_diff(self): return subprocess.run(['git', 'diff', f'{self.base_ref}..{self.sha}'], stdout=subprocess.PIPE).stdout.decode('latin-1') - - def create_issue(self, issue): - return [201, None] - - def close_issue(self, issue): - return 200 - - def get_issue_url(self, new_issue_number): - return "N/A" diff --git a/main.py b/main.py index f73f9b1..049dc57 100644 --- a/main.py +++ b/main.py @@ -10,18 +10,20 @@ import operator from collections import defaultdict from TodoParser import TodoParser from LineStatus import LineStatus +from Client import Client from LocalClient import LocalClient from GitHubClient import GitHubClient if __name__ == "__main__": + client: Client | None = None # Try to create a basic client for communicating with the remote version control server, automatically initialised with environment variables. try: # try to build a GitHub client client = GitHubClient() except EnvironmentError: # don't immediately give up - client = None + pass # if needed, fall back to using a local client for testing client = client or LocalClient() From 649e50253b421d08e72ba29100d7afc0efeb0559 Mon Sep 17 00:00:00 2001 From: Robert Alonso <17463757+rgalonso@users.noreply.github.com> Date: Mon, 28 Oct 2024 18:28:52 +0000 Subject: [PATCH 20/20] test: add unittest to run mypy New unittest file consists of two tests. Both run mypy, but one does so with arguments that are expected to allow the test to pass. The other does not provide those arguments and is expected to fail. The expected failure is intentional, to serve as an ongoing reminder to try to 1) move towards having strict typing in the codebase and to 2) not add any additional errors/warnings in the meantime. Separated requirements into runtime (requirements.txt) and additional development (requirements-dev.txt). Modified devcontainer and workflow to reflect the change. --- .devcontainer/devcontainer.json | 2 +- .github/workflows/ci.yml | 2 +- requirements-dev.txt | 4 ++++ requirements.txt | 3 +-- tests/test_syntax.py | 39 +++++++++++++++++++++++++++++++++ 5 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 requirements-dev.txt create mode 100644 tests/test_syntax.py diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index ce3acb4..9ab1cda 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -12,7 +12,7 @@ // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. - "postCreateCommand": "pip3 install --user -r requirements.txt" + "postCreateCommand": "pip3 install --user -r requirements.txt -r requirements-dev.txt" // Configure tool-specific properties. // "customizations": {}, diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 53d1400..412ba54 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,6 +13,6 @@ jobs: - name: "Install test dependencies" run: | python -m pip install --upgrade pip - pip install -r requirements.txt + pip install -r requirements.txt -r requirements-dev.txt - name: "Run tests" run: python -m pytest \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000..8db956a --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,4 @@ +pytest==8.3.3 +mypy==1.13.0 +mypy-extensions==1.0.0 +types-requests==2.32.0.20241016 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 30ce159..af01943 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,2 @@ requests==2.32.3 -ruamel.yaml==0.18.6 -pytest==8.3.3 \ No newline at end of file +ruamel.yaml==0.18.6 \ No newline at end of file diff --git a/tests/test_syntax.py b/tests/test_syntax.py new file mode 100644 index 0000000..76ae99f --- /dev/null +++ b/tests/test_syntax.py @@ -0,0 +1,39 @@ +# based on https://gist.github.com/bbarker/4ddf4a1c58ae8465f3d37b6f2234a421 + +import os +import subprocess +import sys +import unittest +from typing import List + + +class MyPyTest(unittest.TestCase): + + def __call_mypy__(self, args, files): + result: int = subprocess.call(self.base_mypy_call + args + files, env=os.environ, cwd=self.pypath) + self.assertEqual(result, 0, '') + + def test_run_mypy_app(self): + mypy_args: List[str] = [ + "--disable-error-code", "var-annotated" + ] + self.__call_mypy__(mypy_args, ["main.py"]) + + # Run test again, but without disabling any error codes. + # This is expected to fail, but we intentionally keep this test around to + # 1) try not to add any more errors to what's already in the baseline + # 2) as a reminder to try to move the codebase towards having type checking eventually + @unittest.expectedFailure + def test_run_strict_mypy_app(self): + mypy_args: List[str] = [] + self.__call_mypy__(mypy_args, ["main.py"]) + + def __init__(self, *args, **kwargs) -> None: + super(MyPyTest, self).__init__(*args, **kwargs) + my_env = os.environ.copy() + self.pypath: str = my_env.get("PYTHONPATH", os.getcwd()) + self.base_mypy_call: List[str] = [sys.executable, "-m", "mypy", "--ignore-missing-imports"] + + +if __name__ == '__main__': + unittest.main()