mirror of
https://github.com/ditkrg/todo-to-issue-action.git
synced 2026-01-22 22:06:43 +00:00
Tidy code
This commit is contained in:
parent
6d4eeffd5f
commit
81eaa8f8e3
119
main.py
119
main.py
@ -45,7 +45,7 @@ class Issue(object):
|
|||||||
|
|
||||||
|
|
||||||
class GitHubClient(object):
|
class GitHubClient(object):
|
||||||
"""Basic client for getting the last diff and creating/closing issues."""
|
"""Basic client for getting the last diff and managing issues."""
|
||||||
existing_issues = []
|
existing_issues = []
|
||||||
milestones = []
|
milestones = []
|
||||||
|
|
||||||
@ -72,9 +72,6 @@ class GitHubClient(object):
|
|||||||
}
|
}
|
||||||
auto_p = os.getenv('INPUT_AUTO_P', 'true') == 'true'
|
auto_p = os.getenv('INPUT_AUTO_P', 'true') == 'true'
|
||||||
self.line_break = '\n\n' if auto_p else '\n'
|
self.line_break = '\n\n' if auto_p else '\n'
|
||||||
# Retrieve the existing repo issues now so we can easily check them later.
|
|
||||||
self._get_existing_issues()
|
|
||||||
self._get_milestones()
|
|
||||||
self.auto_assign = os.getenv('INPUT_AUTO_ASSIGN', 'false') == 'true'
|
self.auto_assign = os.getenv('INPUT_AUTO_ASSIGN', 'false') == 'true'
|
||||||
self.actor = os.getenv('INPUT_ACTOR')
|
self.actor = os.getenv('INPUT_ACTOR')
|
||||||
self.insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true'
|
self.insert_issue_urls = os.getenv('INPUT_INSERT_ISSUE_URLS', 'false') == 'true'
|
||||||
@ -83,25 +80,25 @@ class GitHubClient(object):
|
|||||||
else:
|
else:
|
||||||
self.line_base_url = self.base_url
|
self.line_base_url = self.base_url
|
||||||
self.project = os.getenv('INPUT_PROJECT', None)
|
self.project = os.getenv('INPUT_PROJECT', None)
|
||||||
|
# Retrieve the existing repo issues now so we can easily check them later.
|
||||||
# noinspection PyMethodMayBeStatic
|
self._get_existing_issues()
|
||||||
def get_timestamp(self, commit):
|
# Populate milestones so we can perform a lookup if one is specified.
|
||||||
return commit.get('timestamp')
|
self._get_milestones()
|
||||||
|
|
||||||
def get_last_diff(self):
|
def get_last_diff(self):
|
||||||
"""Get the last diff."""
|
"""Get the last diff."""
|
||||||
if self.diff_url:
|
if self.diff_url:
|
||||||
# Diff url was directly passed in config, likely due to this being a PR
|
# Diff url was directly passed in config, likely due to this being a PR.
|
||||||
diff_url = self.diff_url
|
diff_url = self.diff_url
|
||||||
elif self.before != '0000000000000000000000000000000000000000':
|
elif self.before != '0000000000000000000000000000000000000000':
|
||||||
# There is a valid before SHA to compare with, or this is a release being created
|
# There is a valid before SHA to compare with, or this is a release being created.
|
||||||
diff_url = f'{self.repos_url}{self.repo}/compare/{self.before}...{self.sha}'
|
diff_url = f'{self.repos_url}{self.repo}/compare/{self.before}...{self.sha}'
|
||||||
elif len(self.commits) == 1:
|
elif len(self.commits) == 1:
|
||||||
# There is only one commit
|
# There is only one commit.
|
||||||
diff_url = f'{self.repos_url}{self.repo}/commits/{self.sha}'
|
diff_url = f'{self.repos_url}{self.repo}/commits/{self.sha}'
|
||||||
else:
|
else:
|
||||||
# There are several commits: compare with the oldest one
|
# There are several commits: compare with the oldest one.
|
||||||
oldest = sorted(self.commits, key=self.get_timestamp)[0]['id']
|
oldest = sorted(self.commits, key=self._get_timestamp)[0]['id']
|
||||||
diff_url = f'{self.repos_url}{self.repo}/compare/{oldest}...{self.sha}'
|
diff_url = f'{self.repos_url}{self.repo}/compare/{oldest}...{self.sha}'
|
||||||
|
|
||||||
diff_headers = {
|
diff_headers = {
|
||||||
@ -114,6 +111,11 @@ class GitHubClient(object):
|
|||||||
return diff_request.text
|
return diff_request.text
|
||||||
raise Exception('Could not retrieve diff. Operation will abort.')
|
raise Exception('Could not retrieve diff. Operation will abort.')
|
||||||
|
|
||||||
|
# noinspection PyMethodMayBeStatic
|
||||||
|
def _get_timestamp(self, commit):
|
||||||
|
"""Get a commit timestamp."""
|
||||||
|
return commit.get('timestamp')
|
||||||
|
|
||||||
def _get_milestones(self, page=1):
|
def _get_milestones(self, page=1):
|
||||||
"""Get all the milestones."""
|
"""Get all the milestones."""
|
||||||
params = {
|
params = {
|
||||||
@ -129,7 +131,7 @@ class GitHubClient(object):
|
|||||||
self._get_milestones(page + 1)
|
self._get_milestones(page + 1)
|
||||||
|
|
||||||
def _get_milestone(self, title):
|
def _get_milestone(self, title):
|
||||||
"""Get the milestone number for this title (creating one if it doesn't exist)."""
|
"""Get the milestone number for the one with this title (creating one if it doesn't exist)."""
|
||||||
for m in self.milestones:
|
for m in self.milestones:
|
||||||
if m['title'] == title:
|
if m['title'] == title:
|
||||||
return m['number']
|
return m['number']
|
||||||
@ -149,8 +151,7 @@ class GitHubClient(object):
|
|||||||
params = {
|
params = {
|
||||||
'per_page': 100,
|
'per_page': 100,
|
||||||
'page': page,
|
'page': page,
|
||||||
'state': 'open',
|
'state': 'open'
|
||||||
'labels': 'todo'
|
|
||||||
}
|
}
|
||||||
list_issues_request = requests.get(self.issues_url, headers=self.issue_headers, params=params)
|
list_issues_request = requests.get(self.issues_url, headers=self.issue_headers, params=params)
|
||||||
if list_issues_request.status_code == 200:
|
if list_issues_request.status_code == 200:
|
||||||
@ -195,10 +196,12 @@ class GitHubClient(object):
|
|||||||
variables = {
|
variables = {
|
||||||
'owner': owner,
|
'owner': owner,
|
||||||
}
|
}
|
||||||
response = requests.post('https://api.github.com/graphql', json={'query': query, 'variables': variables},
|
project_request = requests.post('https://api.github.com/graphql',
|
||||||
headers=self.graphql_headers)
|
json={'query': query, 'variables': variables},
|
||||||
if response.status_code == 200:
|
headers=self.graphql_headers)
|
||||||
projects = response.json().get('data', {}).get(project_type, {}).get('projectsV2', {}).get('nodes', [])
|
if project_request.status_code == 200:
|
||||||
|
projects = (project_request.json().get('data', {}).get(project_type, {}).get('projectsV2', {})
|
||||||
|
.get('nodes', []))
|
||||||
for project in projects:
|
for project in projects:
|
||||||
if project['title'] == project_name:
|
if project['title'] == project_name:
|
||||||
return project['id']
|
return project['id']
|
||||||
@ -220,11 +223,11 @@ class GitHubClient(object):
|
|||||||
'repo': repo,
|
'repo': repo,
|
||||||
'issue_number': issue_number
|
'issue_number': issue_number
|
||||||
}
|
}
|
||||||
response = requests.post('https://api.github.com/graphql', json={'query': query, 'variables': variables},
|
project_request = requests.post('https://api.github.com/graphql',
|
||||||
headers=self.graphql_headers)
|
json={'query': query, 'variables': variables},
|
||||||
|
headers=self.graphql_headers)
|
||||||
if response.status_code == 200:
|
if project_request.status_code == 200:
|
||||||
return response.json()['data']['repository']['issue']['id']
|
return project_request.json()['data']['repository']['issue']['id']
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _add_issue_to_project(self, issue_id, project_id):
|
def _add_issue_to_project(self, issue_id, project_id):
|
||||||
@ -242,9 +245,10 @@ class GitHubClient(object):
|
|||||||
"projectId": project_id,
|
"projectId": project_id,
|
||||||
"contentId": issue_id
|
"contentId": issue_id
|
||||||
}
|
}
|
||||||
response = requests.post('https://api.github.com/graphql',json={'query': mutation, 'variables': variables},
|
project_request = requests.post('https://api.github.com/graphql',
|
||||||
headers=self.graphql_headers)
|
json={'query': mutation, 'variables': variables},
|
||||||
return response.status_code
|
headers=self.graphql_headers)
|
||||||
|
return project_request.status_code
|
||||||
|
|
||||||
def _comment_issue(self, issue_number, comment):
|
def _comment_issue(self, issue_number, comment):
|
||||||
"""Post a comment on an issue."""
|
"""Post a comment on an issue."""
|
||||||
@ -296,8 +300,7 @@ class GitHubClient(object):
|
|||||||
# Ref = issue number (indicating this is a comment on that issue).
|
# Ref = issue number (indicating this is a comment on that issue).
|
||||||
issue_number = issue.ref.lstrip('#')
|
issue_number = issue.ref.lstrip('#')
|
||||||
if issue_number.isdigit():
|
if issue_number.isdigit():
|
||||||
# Create the comment now and skip the rest.
|
# Create the comment now.
|
||||||
# Not a new issue so doesn't return a number.
|
|
||||||
return self._comment_issue(issue_number, f'{issue.title}\n\n{issue_contents}'), None
|
return self._comment_issue(issue_number, f'{issue.title}\n\n{issue_contents}'), None
|
||||||
else:
|
else:
|
||||||
# Just prepend the ref to the title.
|
# Just prepend the ref to the title.
|
||||||
@ -333,7 +336,7 @@ class GitHubClient(object):
|
|||||||
request_status = issue_request.status_code
|
request_status = issue_request.status_code
|
||||||
issue_number = issue_request.json()['number'] if request_status in [200, 201] else None
|
issue_number = issue_request.json()['number'] if request_status in [200, 201] else None
|
||||||
|
|
||||||
# Check if issue should be added to project now it exists.
|
# Check if issue should be added to a project now it exists.
|
||||||
if issue_number and self.project:
|
if issue_number and self.project:
|
||||||
project_id = self._get_project_id(self.project)
|
project_id = self._get_project_id(self.project)
|
||||||
if project_id:
|
if project_id:
|
||||||
@ -365,14 +368,14 @@ class GitHubClient(object):
|
|||||||
update_issue_url = f'{self.issues_url}/{issue_number}'
|
update_issue_url = f'{self.issues_url}/{issue_number}'
|
||||||
body = {'state': 'closed'}
|
body = {'state': 'closed'}
|
||||||
requests.patch(update_issue_url, headers=self.issue_headers, json=body)
|
requests.patch(update_issue_url, headers=self.issue_headers, json=body)
|
||||||
req = self._comment_issue(issue_number, f'Closed in {self.sha}')
|
request_status = self._comment_issue(issue_number, f'Closed in {self.sha}.')
|
||||||
|
|
||||||
# Update the description if this is a PR.
|
# Update the description if this is a PR.
|
||||||
if os.getenv('GITHUB_EVENT_NAME') == 'pull_request':
|
if os.getenv('GITHUB_EVENT_NAME') == 'pull_request':
|
||||||
pr_number = os.getenv('PR_NUMBER')
|
pr_number = os.getenv('PR_NUMBER')
|
||||||
if pr_number:
|
if pr_number:
|
||||||
req = self._update_pr_body(pr_number, body)
|
request_status = self._update_pr_body(pr_number, body)
|
||||||
return req
|
return request_status
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _update_pr_body(self, pr_number, issue_number):
|
def _update_pr_body(self, pr_number, issue_number):
|
||||||
@ -408,7 +411,7 @@ class TodoParser(object):
|
|||||||
ISSUE_NUMBER_PATTERN = re.compile(r'/issues/(\d+)', re.IGNORECASE)
|
ISSUE_NUMBER_PATTERN = re.compile(r'/issues/(\d+)', re.IGNORECASE)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
# Determine if the Issues should be escaped.
|
# Determine if the issues should be escaped.
|
||||||
self.should_escape = os.getenv('INPUT_ESCAPE', 'true') == 'true'
|
self.should_escape = os.getenv('INPUT_ESCAPE', 'true') == 'true'
|
||||||
# Load any custom identifiers, otherwise use the default.
|
# Load any custom identifiers, otherwise use the default.
|
||||||
custom_identifiers = os.getenv('INPUT_IDENTIFIERS')
|
custom_identifiers = os.getenv('INPUT_IDENTIFIERS')
|
||||||
@ -426,8 +429,7 @@ class TodoParser(object):
|
|||||||
print('Invalid identifiers dict, ignoring.')
|
print('Invalid identifiers dict, ignoring.')
|
||||||
|
|
||||||
self.languages_dict = None
|
self.languages_dict = None
|
||||||
|
# Check if the standard collections should be loaded.
|
||||||
# Check if the standard collections should be loaded
|
|
||||||
if os.getenv('INPUT_NO_STANDARD', 'false') != 'true':
|
if os.getenv('INPUT_NO_STANDARD', 'false') != 'true':
|
||||||
# Load the languages data for ascertaining file types.
|
# Load the languages data for ascertaining file types.
|
||||||
languages_url = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml'
|
languages_url = 'https://raw.githubusercontent.com/github/linguist/master/lib/linguist/languages.yml'
|
||||||
@ -452,28 +454,28 @@ class TodoParser(object):
|
|||||||
|
|
||||||
custom_languages = os.getenv('INPUT_LANGUAGES', '')
|
custom_languages = os.getenv('INPUT_LANGUAGES', '')
|
||||||
if custom_languages != '':
|
if custom_languages != '':
|
||||||
# Load all custom languages
|
# Load all custom languages.
|
||||||
for path in custom_languages.split(','):
|
for path in custom_languages.split(','):
|
||||||
# noinspection PyBroadException
|
# noinspection PyBroadException
|
||||||
try:
|
try:
|
||||||
# Decide if the path is a url or local file
|
# Decide if the path is a url or local file.
|
||||||
if path.startswith('http'):
|
if path.startswith('http'):
|
||||||
languages_request = requests.get(path)
|
languages_request = requests.get(path)
|
||||||
if languages_request.status_code != 200:
|
if languages_request.status_code != 200:
|
||||||
print('Cannot retrieve custom language file. (\''+path+'\')')
|
print(f'Cannot retrieve custom language file "{path}".')
|
||||||
continue
|
continue
|
||||||
data = languages_request.json()
|
data = languages_request.json()
|
||||||
else:
|
else:
|
||||||
path = os.path.join(os.getcwd(), path)
|
path = os.path.join(os.getcwd(), path)
|
||||||
if not os.path.exists(path) or not os.path.isfile(path):
|
if not os.path.exists(path) or not os.path.isfile(path):
|
||||||
print('Cannot retrieve custom language file. (\''+path+'\')')
|
print(f'Cannot retrieve custom language file "{path}".')
|
||||||
continue
|
continue
|
||||||
f = open(path)
|
f = open(path)
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
|
|
||||||
# Iterate through the definitions
|
# Iterate through the definitions.
|
||||||
for lang in data:
|
for lang in data:
|
||||||
# Add/Replace the language definition
|
# Add/replace the language definition.
|
||||||
self.languages_dict[lang['language']] = {}
|
self.languages_dict[lang['language']] = {}
|
||||||
self.languages_dict[lang['language']]['type'] = ''
|
self.languages_dict[lang['language']]['type'] = ''
|
||||||
self.languages_dict[lang['language']]['color'] = ''
|
self.languages_dict[lang['language']]['color'] = ''
|
||||||
@ -482,7 +484,7 @@ class TodoParser(object):
|
|||||||
self.languages_dict[lang['language']]['ace_mode'] = 'text'
|
self.languages_dict[lang['language']]['ace_mode'] = 'text'
|
||||||
self.languages_dict[lang['language']]['language_id'] = 0
|
self.languages_dict[lang['language']]['language_id'] = 0
|
||||||
|
|
||||||
# Check if a syntax with the language name already exists
|
# Check if comment syntax for the language name already exists.
|
||||||
counter = 0
|
counter = 0
|
||||||
exists = False
|
exists = False
|
||||||
for syntax in self.syntax_dict:
|
for syntax in self.syntax_dict:
|
||||||
@ -493,18 +495,18 @@ class TodoParser(object):
|
|||||||
counter = counter + 1
|
counter = counter + 1
|
||||||
|
|
||||||
if exists:
|
if exists:
|
||||||
# When the syntax exists it will be popped out of the list
|
# When the syntax exists it will be popped out of the list.
|
||||||
self.syntax_dict.pop(counter)
|
self.syntax_dict.pop(counter)
|
||||||
|
|
||||||
# And be replaced with the new syntax definition
|
# And be replaced with the new syntax definition.
|
||||||
self.syntax_dict.append({
|
self.syntax_dict.append({
|
||||||
'language': lang['language'],
|
'language': lang['language'],
|
||||||
'markers': lang['markers']
|
'markers': lang['markers']
|
||||||
})
|
})
|
||||||
except Exception:
|
except Exception:
|
||||||
print('An error occurred in the custom language file (\''+path+'\')')
|
print(f'An error occurred in the custom language file "{path}".')
|
||||||
print('Please check the file, or if it represents undefined behavior, '
|
print('Please check the file, or if it represents undefined behavior, '
|
||||||
'create an issue at \'https://github.com/alstr/todo-to-issue-action/issues\'')
|
'create an issue at https://github.com/alstr/todo-to-issue-action/issues.')
|
||||||
|
|
||||||
# noinspection PyTypeChecker
|
# noinspection PyTypeChecker
|
||||||
def parse(self, diff_file):
|
def parse(self, diff_file):
|
||||||
@ -542,7 +544,7 @@ class TodoParser(object):
|
|||||||
continue
|
continue
|
||||||
curr_markers, curr_markdown_language = self._get_file_details(curr_file)
|
curr_markers, curr_markdown_language = self._get_file_details(curr_file)
|
||||||
if not curr_markers or not curr_markdown_language:
|
if not curr_markers or not curr_markdown_language:
|
||||||
print(f'Could not check {curr_file} for TODOs as this language is not yet supported by default.')
|
print(f'Could not check "{curr_file}" for TODOs as this language is not yet supported by default.')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Break this section down into individual changed code blocks.
|
# Break this section down into individual changed code blocks.
|
||||||
@ -587,7 +589,7 @@ class TodoParser(object):
|
|||||||
for marker in block['markers']:
|
for marker in block['markers']:
|
||||||
# Check if there are line or block comments.
|
# Check if there are line or block comments.
|
||||||
if marker['type'] == 'line':
|
if marker['type'] == 'line':
|
||||||
# Add a negative lookup include the second character from alternative comment patterns
|
# Add a negative lookup to include the second character from alternative comment patterns.
|
||||||
# This step is essential to handle cases like in Julia, where '#' and '#=' are comment patterns.
|
# This step is essential to handle cases like in Julia, where '#' and '#=' are comment patterns.
|
||||||
# It ensures that when a space after the comment is optional ('\s' => '\s*'),
|
# It ensures that when a space after the comment is optional ('\s' => '\s*'),
|
||||||
# the second character would be matched because of the any character expression ('.+').
|
# the second character would be matched because of the any character expression ('.+').
|
||||||
@ -601,7 +603,7 @@ class TodoParser(object):
|
|||||||
suff_escape_list.append(self._extract_character(to_escape['pattern'], 1))
|
suff_escape_list.append(self._extract_character(to_escape['pattern'], 1))
|
||||||
else:
|
else:
|
||||||
# Block comments and line comments cannot have the same comment pattern,
|
# Block comments and line comments cannot have the same comment pattern,
|
||||||
# so a check if the string is the same is unnecessary
|
# so a check if the string is the same is unnecessary.
|
||||||
if to_escape['pattern']['start'][0] == marker['pattern'][0]:
|
if to_escape['pattern']['start'][0] == marker['pattern'][0]:
|
||||||
suff_escape_list.append(self._extract_character(to_escape['pattern']['start'], 1))
|
suff_escape_list.append(self._extract_character(to_escape['pattern']['start'], 1))
|
||||||
search = to_escape['pattern']['end'].find(marker['pattern'])
|
search = to_escape['pattern']['end'].find(marker['pattern'])
|
||||||
@ -656,8 +658,7 @@ class TodoParser(object):
|
|||||||
return issues
|
return issues
|
||||||
|
|
||||||
def _get_language_details(self, language_name, attribute, value):
|
def _get_language_details(self, language_name, attribute, value):
|
||||||
"""Try and get the Markdown language and comment syntax
|
"""Try and get the Markdown language and comment syntax data based on a specified attribute of the language."""
|
||||||
data based on a specified attribute of the language."""
|
|
||||||
attributes = [at.lower() for at in self.languages_dict[language_name][attribute]]
|
attributes = [at.lower() for at in self.languages_dict[language_name][attribute]]
|
||||||
if value.lower() in attributes:
|
if value.lower() in attributes:
|
||||||
for syntax_details in self.syntax_dict:
|
for syntax_details in self.syntax_dict:
|
||||||
@ -669,7 +670,7 @@ class TodoParser(object):
|
|||||||
"""Try and get the Markdown language and comment syntax data for the given file."""
|
"""Try and get the Markdown language and comment syntax data for the given file."""
|
||||||
file_name, extension = os.path.splitext(os.path.basename(file))
|
file_name, extension = os.path.splitext(os.path.basename(file))
|
||||||
for language_name in self.languages_dict:
|
for language_name in self.languages_dict:
|
||||||
if extension != "" and 'extensions' in self.languages_dict[language_name]:
|
if extension != '' and 'extensions' in self.languages_dict[language_name]:
|
||||||
syntax_details, ace_mode = self._get_language_details(language_name, 'extensions', extension)
|
syntax_details, ace_mode = self._get_language_details(language_name, 'extensions', extension)
|
||||||
if syntax_details is not None and ace_mode is not None:
|
if syntax_details is not None and ace_mode is not None:
|
||||||
return syntax_details, ace_mode
|
return syntax_details, ace_mode
|
||||||
@ -700,7 +701,7 @@ class TodoParser(object):
|
|||||||
found_issues.append(curr_issue)
|
found_issues.append(curr_issue)
|
||||||
curr_issue = Issue(
|
curr_issue = Issue(
|
||||||
title=line_title,
|
title=line_title,
|
||||||
labels=['todo'],
|
labels=[],
|
||||||
assignees=[],
|
assignees=[],
|
||||||
milestone=None,
|
milestone=None,
|
||||||
body=[],
|
body=[],
|
||||||
@ -728,7 +729,7 @@ class TodoParser(object):
|
|||||||
start_line += 1
|
start_line += 1
|
||||||
|
|
||||||
elif curr_issue:
|
elif curr_issue:
|
||||||
# Extract other issue information that may exist.
|
# Extract other issue information that may exist below the title.
|
||||||
line_labels = self._get_labels(cleaned_line)
|
line_labels = self._get_labels(cleaned_line)
|
||||||
line_assignees = self._get_assignees(cleaned_line)
|
line_assignees = self._get_assignees(cleaned_line)
|
||||||
line_milestone = self._get_milestone(cleaned_line)
|
line_milestone = self._get_milestone(cleaned_line)
|
||||||
@ -845,13 +846,13 @@ class TodoParser(object):
|
|||||||
title_identifier = None
|
title_identifier = None
|
||||||
for identifier in self.identifiers:
|
for identifier in self.identifiers:
|
||||||
title_identifier = identifier
|
title_identifier = identifier
|
||||||
title_pattern = re.compile(r'(?<=' + identifier + r'[\s:]).+', re.IGNORECASE)
|
title_pattern = re.compile(fr'(?<={identifier}[\s:]).+', re.IGNORECASE)
|
||||||
title_search = title_pattern.search(comment, re.IGNORECASE)
|
title_search = title_pattern.search(comment, re.IGNORECASE)
|
||||||
if title_search:
|
if title_search:
|
||||||
title = title_search.group(0).strip(': ')
|
title = title_search.group(0).strip(': ')
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
title_ref_pattern = re.compile(r'(?<=' + identifier + r'\().+', re.IGNORECASE)
|
title_ref_pattern = re.compile(fr'(?<={identifier}\().+', re.IGNORECASE)
|
||||||
title_ref_search = title_ref_pattern.search(comment, re.IGNORECASE)
|
title_ref_search = title_ref_pattern.search(comment, re.IGNORECASE)
|
||||||
if title_ref_search:
|
if title_ref_search:
|
||||||
title = title_ref_search.group(0).strip()
|
title = title_ref_search.group(0).strip()
|
||||||
@ -896,8 +897,6 @@ class TodoParser(object):
|
|||||||
milestone = None
|
milestone = None
|
||||||
if milestone_search:
|
if milestone_search:
|
||||||
milestone = milestone_search.group(0)
|
milestone = milestone_search.group(0)
|
||||||
if milestone.isdigit():
|
|
||||||
milestone = int(milestone)
|
|
||||||
return milestone
|
return milestone
|
||||||
|
|
||||||
# noinspection PyMethodMayBeStatic
|
# noinspection PyMethodMayBeStatic
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user