Coverage for src / qdrant_loader / connectors / jira / cloud_connector.py: 94%

51 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-04-10 09:40 +0000

1"""Jira connector implementation.""" 

2 

3from collections.abc import AsyncGenerator 

4from datetime import datetime 

5from urllib.parse import urlparse # noqa: F401 - may be used in URL handling 

6 

7from requests.auth import HTTPBasicAuth # noqa: F401 - compatibility 

8 

9from qdrant_loader.connectors.jira.connector import BaseJiraConnector 

10from qdrant_loader.connectors.jira.models import ( 

11 JiraIssue, 

12) 

13from qdrant_loader.utils.logging import LoggingConfig 

14 

15logger = LoggingConfig.get_logger(__name__) 

16 

17 

18class JiraCloudConnector(BaseJiraConnector): 

19 """Jira cloud connector for fetching and processing issues.""" 

20 

21 CLOUD_JIRA_VERSION = "3" 

22 SEARCH_ENDPOINT = "search/jql" 

23 

24 def _get_api_url(self, endpoint: str) -> str: 

25 """Construct the full API URL for an endpoint. 

26 

27 Args: 

28 endpoint: API endpoint path 

29 

30 Returns: 

31 str: Full API URL 

32 """ 

33 

34 return f"{self.base_url}/rest/api/{self.CLOUD_JIRA_VERSION}/{endpoint}" 

35 

36 async def get_issues( 

37 self, updated_after: datetime | None = None 

38 ) -> AsyncGenerator[JiraIssue, None]: 

39 """ 

40 Get all issues from Jira. 

41 

42 Args: 

43 updated_after: Optional datetime to filter issues updated after this time 

44 

45 Yields: 

46 JiraIssue objects 

47 """ 

48 next_page_token: str | None = None 

49 processed_count = 0 

50 page_size = self.config.page_size 

51 attempted_count = 0 

52 # Log progress every 100 issues instead of every 50 

53 progress_log_interval = 100 

54 

55 logger.info( 

56 "🎫 Starting JIRA issue retrieval", 

57 project_key=self.config.project_key, 

58 page_size=page_size, 

59 updated_after=updated_after.isoformat() if updated_after else None, 

60 ) 

61 

62 while True: 

63 jql = self._build_jql_filter(updated_after) 

64 

65 params = { 

66 "jql": jql, 

67 "maxResults": page_size, 

68 "expand": "changelog", 

69 "fields": "*all", 

70 } 

71 

72 if next_page_token: 

73 params["nextPageToken"] = next_page_token 

74 

75 logger.debug( 

76 "Fetching JIRA issues page", 

77 next_page_token=next_page_token, 

78 page_size=page_size, 

79 jql=jql, 

80 ) 

81 

82 try: 

83 response = await self._make_request( 

84 "GET", self.SEARCH_ENDPOINT, params=params 

85 ) 

86 except Exception as e: 

87 logger.error( 

88 "Failed to fetch JIRA issues page", 

89 next_page_token=next_page_token, 

90 page_size=page_size, 

91 error=str(e), 

92 error_type=type(e).__name__, 

93 ) 

94 raise 

95 

96 if not response or not response.get("issues"): 

97 logger.debug( 

98 "No more JIRA issues found, stopping pagination", 

99 next_page_token=next_page_token, 

100 total_processed=processed_count, 

101 ) 

102 break 

103 

104 issues = response["issues"] 

105 

106 for issue in issues: 

107 try: 

108 parsed_issue = self._parse_issue(issue) 

109 yield parsed_issue 

110 processed_count += 1 

111 

112 if (processed_count) % progress_log_interval == 0: 

113 logger.info( 

114 f"🎫 Processed {processed_count} JIRA issues so far" 

115 ) 

116 

117 except Exception as e: 

118 logger.error( 

119 "Failed to parse JIRA issue", 

120 issue_id=issue.get("id"), 

121 issue_key=issue.get("key"), 

122 error=str(e), 

123 error_type=type(e).__name__, 

124 ) 

125 # Continue processing other issues instead of failing completely 

126 continue 

127 

128 attempted_count += len(issues) 

129 # Check next page token 

130 next_page_token = response.get("nextPageToken") 

131 is_last = response.get("isLast") 

132 if is_last or not next_page_token: 

133 logger.info( 

134 f"✅ Completed JIRA issue retrieval: " 

135 f"{attempted_count} issues attempted, " 

136 f"{processed_count} successfully processed" 

137 ) 

138 break