Skip to content

Commit 038e9c7

Browse files
committed
test: add test
1 parent 42ed5f3 commit 038e9c7

File tree

1 file changed

+116
-116
lines changed

1 file changed

+116
-116
lines changed

test/testcase/test_sync/test_sync_retrieval.py

Lines changed: 116 additions & 116 deletions
Original file line numberDiff line numberDiff line change
@@ -137,62 +137,62 @@ class TestRecord:
137137
upload_file_dict.update({"file": open(filepath, "rb")})
138138
upload_file_data_list.append(upload_file_dict)
139139

140-
# @pytest.mark.run(order=31)
141-
# @pytest.mark.parametrize("text_splitter", text_splitter_list)
142-
# def test_create_record_by_text(self, collection_id, text_splitter):
143-
# # Create a text record.
144-
# text = "Machine learning is a subfield of artificial intelligence (AI) that involves the development of algorithms that allow computers to learn from and make decisions or predictions based on data."
145-
# create_record_data = {
146-
# "type": "text",
147-
# "title": "Machine learning",
148-
# "collection_id": collection_id,
149-
# "content": text,
150-
# "text_splitter": text_splitter,
151-
# "metadata": {"key1": "value1", "key2": "value2"},
152-
# }
153-
# res = create_record(**create_record_data)
154-
# res_dict = vars(res)
155-
# assume_record_result(create_record_data, res_dict)
156-
#
157-
# @pytest.mark.run(order=31)
158-
# def test_create_record_by_web(self, collection_id):
159-
# # Create a web record.
160-
# text_splitter = TokenTextSplitter(chunk_size=200, chunk_overlap=20)
161-
# create_record_data = {
162-
# "type": "web",
163-
# "title": "TaskingAI",
164-
# "collection_id": collection_id,
165-
# "url": "https://docs.tasking.ai/docs/guide/getting_started/overview/",
166-
# "text_splitter": text_splitter,
167-
# "metadata": {"key1": "value1", "key2": "value2"},
168-
# }
169-
#
170-
# res = create_record(**create_record_data)
171-
# res_dict = vars(res)
172-
# assume_record_result(create_record_data, res_dict)
173-
#
174-
# @pytest.mark.run(order=32)
175-
# @pytest.mark.parametrize("upload_file_data", upload_file_data_list[:2])
176-
# def test_create_record_by_file(self, collection_id, upload_file_data):
177-
# # upload file
178-
# upload_file_res = upload_file(**upload_file_data)
179-
# upload_file_dict = vars(upload_file_res)
180-
# file_id = upload_file_dict["file_id"]
181-
# pytest.assume(file_id is not None)
182-
#
183-
# text_splitter = TokenTextSplitter(chunk_size=200, chunk_overlap=20)
184-
# create_record_data = {
185-
# "type": "file",
186-
# "title": "TaskingAI",
187-
# "collection_id": collection_id,
188-
# "file_id": file_id,
189-
# "text_splitter": text_splitter,
190-
# "metadata": {"key1": "value1", "key2": "value2"},
191-
# }
192-
#
193-
# res = create_record(**create_record_data)
194-
# res_dict = vars(res)
195-
# assume_record_result(create_record_data, res_dict)
140+
@pytest.mark.run(order=31)
141+
@pytest.mark.parametrize("text_splitter", text_splitter_list)
142+
def test_create_record_by_text(self, collection_id, text_splitter):
143+
# Create a text record.
144+
text = "Machine learning is a subfield of artificial intelligence (AI) that involves the development of algorithms that allow computers to learn from and make decisions or predictions based on data."
145+
create_record_data = {
146+
"type": "text",
147+
"title": "Machine learning",
148+
"collection_id": collection_id,
149+
"content": text,
150+
"text_splitter": text_splitter,
151+
"metadata": {"key1": "value1", "key2": "value2"},
152+
}
153+
res = create_record(**create_record_data)
154+
res_dict = vars(res)
155+
assume_record_result(create_record_data, res_dict)
156+
157+
@pytest.mark.run(order=31)
158+
def test_create_record_by_web(self, collection_id):
159+
# Create a web record.
160+
text_splitter = TokenTextSplitter(chunk_size=200, chunk_overlap=20)
161+
create_record_data = {
162+
"type": "web",
163+
"title": "TaskingAI",
164+
"collection_id": collection_id,
165+
"url": "https://docs.tasking.ai/docs/guide/getting_started/overview/",
166+
"text_splitter": text_splitter,
167+
"metadata": {"key1": "value1", "key2": "value2"},
168+
}
169+
170+
res = create_record(**create_record_data)
171+
res_dict = vars(res)
172+
assume_record_result(create_record_data, res_dict)
173+
174+
@pytest.mark.run(order=32)
175+
@pytest.mark.parametrize("upload_file_data", upload_file_data_list[:2])
176+
def test_create_record_by_file(self, collection_id, upload_file_data):
177+
# upload file
178+
upload_file_res = upload_file(**upload_file_data)
179+
upload_file_dict = vars(upload_file_res)
180+
file_id = upload_file_dict["file_id"]
181+
pytest.assume(file_id is not None)
182+
183+
text_splitter = TokenTextSplitter(chunk_size=200, chunk_overlap=20)
184+
create_record_data = {
185+
"type": "file",
186+
"title": "TaskingAI",
187+
"collection_id": collection_id,
188+
"file_id": file_id,
189+
"text_splitter": text_splitter,
190+
"metadata": {"key1": "value1", "key2": "value2"},
191+
}
192+
193+
res = create_record(**create_record_data)
194+
res_dict = vars(res)
195+
assume_record_result(create_record_data, res_dict)
196196

197197
@pytest.mark.run(order=32)
198198
def test_list_records(self, collection_id):
@@ -231,66 +231,66 @@ def test_get_record(self, collection_id):
231231
pytest.assume(res_dict["record_id"] == record_id)
232232
# pytest.assume(res_dict["status"] == "ready")
233233

234-
@pytest.mark.run(order=34)
235-
@pytest.mark.parametrize("text_splitter", text_splitter_list)
236-
def test_update_record_by_text(self, collection_id, record_id, text_splitter):
237-
# Update a record.
238-
239-
update_record_data = {
240-
"type": "text",
241-
"title": "TaskingAI",
242-
"collection_id": collection_id,
243-
"record_id": record_id,
244-
"content": "TaskingAI is an AI-native application development platform that unifies modules like Model, Retrieval, Assistant, and Tool into one seamless ecosystem, streamlining the creation and deployment of applications for developers.",
245-
"text_splitter": text_splitter,
246-
"metadata": {"test": "test"},
247-
}
248-
res = update_record(**update_record_data)
249-
res_dict = vars(res)
250-
assume_record_result(update_record_data, res_dict)
251-
252-
@pytest.mark.run(order=34)
253-
@pytest.mark.parametrize("text_splitter", text_splitter_list)
254-
def test_update_record_by_web(self, collection_id, record_id, text_splitter):
255-
# Update a record.
256-
257-
update_record_data = {
258-
"type": "web",
259-
"title": "TaskingAI",
260-
"collection_id": collection_id,
261-
"record_id": record_id,
262-
"url": "https://docs.tasking.ai/docs/guide/getting_started/overview/",
263-
"text_splitter": text_splitter,
264-
"metadata": {"test": "test"},
265-
}
266-
res = update_record(**update_record_data)
267-
res_dict = vars(res)
268-
assume_record_result(update_record_data, res_dict)
269-
270-
@pytest.mark.run(order=35)
271-
@pytest.mark.parametrize("upload_file_data", upload_file_data_list[2:3])
272-
def test_update_record_by_file(self, collection_id, record_id, upload_file_data):
273-
# upload file
274-
upload_file_res = upload_file(**upload_file_data)
275-
upload_file_dict = vars(upload_file_res)
276-
file_id = upload_file_dict["file_id"]
277-
pytest.assume(file_id is not None)
278-
279-
# Update a record.
280-
text_splitter = TokenTextSplitter(chunk_size=200, chunk_overlap=20)
281-
282-
update_record_data = {
283-
"type": "file",
284-
"title": "TaskingAI",
285-
"collection_id": collection_id,
286-
"record_id": record_id,
287-
"file_id": file_id,
288-
"text_splitter": text_splitter,
289-
"metadata": {"test": "test"},
290-
}
291-
res = update_record(**update_record_data)
292-
res_dict = vars(res)
293-
assume_record_result(update_record_data, res_dict)
234+
# @pytest.mark.run(order=34)
235+
# @pytest.mark.parametrize("text_splitter", text_splitter_list)
236+
# def test_update_record_by_text(self, collection_id, record_id, text_splitter):
237+
# # Update a record.
238+
#
239+
# update_record_data = {
240+
# "type": "text",
241+
# "title": "TaskingAI",
242+
# "collection_id": collection_id,
243+
# "record_id": record_id,
244+
# "content": "TaskingAI is an AI-native application development platform that unifies modules like Model, Retrieval, Assistant, and Tool into one seamless ecosystem, streamlining the creation and deployment of applications for developers.",
245+
# "text_splitter": text_splitter,
246+
# "metadata": {"test": "test"},
247+
# }
248+
# res = update_record(**update_record_data)
249+
# res_dict = vars(res)
250+
# assume_record_result(update_record_data, res_dict)
251+
#
252+
# @pytest.mark.run(order=34)
253+
# @pytest.mark.parametrize("text_splitter", text_splitter_list)
254+
# def test_update_record_by_web(self, collection_id, record_id, text_splitter):
255+
# # Update a record.
256+
#
257+
# update_record_data = {
258+
# "type": "web",
259+
# "title": "TaskingAI",
260+
# "collection_id": collection_id,
261+
# "record_id": record_id,
262+
# "url": "https://docs.tasking.ai/docs/guide/getting_started/overview/",
263+
# "text_splitter": text_splitter,
264+
# "metadata": {"test": "test"},
265+
# }
266+
# res = update_record(**update_record_data)
267+
# res_dict = vars(res)
268+
# assume_record_result(update_record_data, res_dict)
269+
#
270+
# @pytest.mark.run(order=35)
271+
# @pytest.mark.parametrize("upload_file_data", upload_file_data_list[2:3])
272+
# def test_update_record_by_file(self, collection_id, record_id, upload_file_data):
273+
# # upload file
274+
# upload_file_res = upload_file(**upload_file_data)
275+
# upload_file_dict = vars(upload_file_res)
276+
# file_id = upload_file_dict["file_id"]
277+
# pytest.assume(file_id is not None)
278+
#
279+
# # Update a record.
280+
# text_splitter = TokenTextSplitter(chunk_size=200, chunk_overlap=20)
281+
#
282+
# update_record_data = {
283+
# "type": "file",
284+
# "title": "TaskingAI",
285+
# "collection_id": collection_id,
286+
# "record_id": record_id,
287+
# "file_id": file_id,
288+
# "text_splitter": text_splitter,
289+
# "metadata": {"test": "test"},
290+
# }
291+
# res = update_record(**update_record_data)
292+
# res_dict = vars(res)
293+
# assume_record_result(update_record_data, res_dict)
294294

295295
@pytest.mark.run(order=79)
296296
def test_delete_record(self, collection_id):

0 commit comments

Comments
 (0)