Skip to content

Commit 451f2e1

Browse files
committed
removed some comments
1 parent 82503a6 commit 451f2e1

File tree

1 file changed

+6
-32
lines changed

1 file changed

+6
-32
lines changed

sqlserver/datadog_checks/sqlserver/schemas.py

Lines changed: 6 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,8 @@ def fetch_schema_data(cursor, db_name):
117117
pdb.set_trace()
118118
print(self.schemas_per_db)
119119

120-
#per DB per sqhema per tables.
121120
# TODO how often ?
122-
# TODO put in a class
121+
123122
#TODOTODO do we need this map/list format if we are not dumping in json ??? May be we need to send query results as they are ?
124123

125124
#TODO Looks fine similar to Postgres, do we need to do someting with prinicipal_id
@@ -138,36 +137,22 @@ def _query_schema_information(self, cursor):
138137
self._log.debug("fetched schemas len(rows)=%s", len(schemas))
139138
return schemas
140139

141-
#TODO we need to take care of new DB / removed DB
142-
#def get_current_db_times(cursor):
143-
# list of all known DBs
144-
145-
#def execute_time_query():
146-
# self._last_time_collected_diff_per_db =
147-
148-
149-
150-
#TODO will nedd a separate query for changed indexes
140+
#TODO collect diffs : we need to take care of new DB / removed DB . schemas new removed
141+
# will nedd a separate query for changed indexes
151142

152-
153-
# def payload consume , push in data amount
154143
def _get_table_data(self, table, schema, cursor):
155-
#while processing tables we would like to stop after X amount of data in payload.
156144
table["columns"] = self._get_columns_data_per_table(table["name"], schema["name"], cursor)
157145
table["partitions"] = self._get_partitions_data_per_table(table["object_id"], cursor)
158146
if str(table["object_id"]) == "1803153469":
159147
pdb.set_trace()
160148
print("should have index")
161149
table["indexes"] = self._get_index_data_per_table(table["object_id"], cursor)
162150
table["foreign_keys"] = self._get_foreign_key_data_per_table(table["object_id"], cursor)
163-
return False
164-
165-
151+
#TODO probably here decide based on the columns amount
152+
return True
153+
166154
#TODO in SQLServer partitioned child tables should have the same object_id might be worth checking with a test.
167155

168-
169-
# TODO how often ?
170-
# TODO put in a class
171156
#TODOTODO do we need this map/list format if we are not dumping in json ??? May be we need to send query results as they are ?
172157
def _get_tables(self, schema, cursor):
173158
cursor.execute(TABLES_IN_SCHEMA_QUERY.format(schema["schema_id"]))
@@ -176,17 +161,6 @@ def _get_tables(self, schema, cursor):
176161
# rows = [dict(zip(columns + ["columns", "indexes", "partitions", "foreign_keys"], row + [[], [], [], []])) for row in cursor.fetchall()] #TODO may be this works
177162
return [ {"object_id" : row["object_id"], "name" : row['name'], "columns" : [], "indexes" : [], "partitions" : [], "foreign_keys" : []} for row in rows ]
178163

179-
# TODO how often ?
180-
# TODO put in a class
181-
#TODOTODO do we need this map/list format if we are not dumping in json ??? May be we need to send query results as they are ?
182-
183-
184-
# TODO modify_date - there is a modify date !!!
185-
# TODO what is principal_id
186-
# TODO is_replicated - might be interesting ?
187-
188-
189-
190164
def _get_columns_data_per_table(self, table_name, schema_name, cursor):
191165
return execute_query_output_result_as_a_dict(COLUMN_QUERY.format(table_name, schema_name), cursor)
192166

0 commit comments

Comments
 (0)