diff --git a/challenge_config.yaml b/challenge_config.yaml
index ede451a..31cb429 100755
--- a/challenge_config.yaml
+++ b/challenge_config.yaml
@@ -1,35 +1,83 @@
# If you are not sure what all these fields mean, please refer our documentation here:
# https://evalai.readthedocs.io/en/latest/configuration.html
-title: Random Number Generator Challenge
-short_description: Random number generation challenge for each submission
+title: Object Instance Detection @ ACCV2024
+short_description: Instance Detection
description: templates/description.html
evaluation_details: templates/evaluation_details.html
terms_and_conditions: templates/terms_and_conditions.html
image: logo.jpg
submission_guidelines: templates/submission_guidelines.html
-leaderboard_description: Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras egestas a libero nec sagittis.
+leaderboard_description: View the Real-world instance detection leaderboard here. For all metrics, higher number means better performance. We rank methods w.r.t the primary metric, i.e., AP.
evaluation_script: evaluation_script.zip
remote_evaluation: False
is_docker_based: False
-start_date: 2019-01-01 00:00:00
-end_date: 2099-05-31 23:59:59
+start_date: 2024-08-10 00:00:00
+end_date: 2099-10-25 23:59:59
published: True
leaderboard:
- id: 1
schema:
{
- "labels": ["Metric1", "Metric2", "Metric3", "Total"],
- "default_order_by": "Total",
+ "labels": ["AP", "AP50", "AP75", "AP_easy", "AP_hard", "AP_small", "AP_medium", "AP_large", "AR_1", "AR_10", "AR_100", "AR_small", "AR_medium", "AR_large"],
+ "default_order_by": "AP",
"metadata": {
- "Metric1": {
- "sort_ascending": True,
- "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
+ "AP": {
+ "sort_ascending": False,
+ "description": "AP averages the precision over all instances at IoU threshold from 0.5 to 0.95 with the step size 0.05.",
+ },
+ "AP50": {
+ "sort_ascending": False,
+ "description": "AP50 is the precision averaged over all instances with IoU threshold as 0.5.",
+ },
+ "AP75": {
+ "sort_ascending": False,
+ "description": "AP75 is the precision averaged over all instances with IoU threshold as 0.75.",
+ },
+ "AP_easy": {
+ "sort_ascending": False,
+ "description": "AP_easy is the AP on easy scene images.",
+ },
+ "AP_hard": {
+ "sort_ascending": False,
+ "description": "AP_easy is the AP on hard scene images.",
+ },
+ "AP_small": {
+ "sort_ascending": False,
+ "description": "AP_easy is the AP on small object instances.",
+ },
+ "AP_medium": {
+ "sort_ascending": False,
+ "description": "AP_medium is the AP on medium object instances.",
+ },
+ "AP_large": {
+ "sort_ascending": False,
+ "description": "AP_hard is the AP on hard object instances.",
+ },
+ "AR_1": {
+ "sort_ascending": False,
+ "description": "AR averages the proposal recall at IoU threshold from 0.5 to 1.0 with the step size 0.05, given 1 detection per image.",
+ },
+ "AR_10": {
+ "sort_ascending": False,
+ "description": "AR averages the proposal recall at IoU threshold from 0.5 to 1.0 with the step size 0.05, given 10 detection per image.",
+ },
+ "AR_100": {
+ "sort_ascending": False,
+ "description": "AR averages the proposal recall at IoU threshold from 0.5 to 1.0 with the step size 0.05, given 100 detection per image.",
+ },
+ "AR_small": {
+ "sort_ascending": False,
+ "description": "AR averages the proposal recall at IoU threshold from 0.5 to 1.0 with the step size 0.05, for small instances.",
+ },
+ "AR_medium": {
+ "sort_ascending": False,
+ "description": "AR averages the proposal recall at IoU threshold from 0.5 to 1.0 with the step size 0.05, for medium instances.",
+ },
+ "AR_large": {
+ "sort_ascending": False,
+ "description": "AR averages the proposal recall at IoU threshold from 0.5 to 1.0 with the step size 0.05, for large instances.",
},
- "Metric2": {
- "sort_ascending": True,
- "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
- }
}
}
@@ -40,8 +88,8 @@ challenge_phases:
leaderboard_public: False
is_public: True
is_submission_public: True
- start_date: 2019-01-19 00:00:00
- end_date: 2099-04-25 23:59:59
+ start_date: 2024-08-10 00:00:00
+ end_date: 2099-10-25 23:59:59
test_annotation_file: annotations/test_annotations_devsplit.json
codename: dev
max_submissions_per_day: 5
@@ -57,22 +105,25 @@ challenge_phases:
- name: publication_url
is_visible: True
submission_meta_attributes:
- - name: TextAttribute
- description: Sample
+ - name: Did you use any pertained models, e.g., vision-language models (CLIP), vision models (DINOv2), etc?
+ description: Yes or No? If Yes, what models?
type: text
- required: False
- - name: SingleOptionAttribute
- description: Sample
- type: radio
- options: ["A", "B", "C"]
- - name: MultipleChoiceAttribute
- description: Sample
- type: checkbox
- options: ["alpha", "beta", "gamma"]
- - name: TrueFalseField
- description: Sample
+ required: True
+ - name: Did you use additional training data, e.g., any data collected by yourself?
+ description: Yes or No? If Yes, what data?
+ type: text
+ - name: What types of GPUs did you use (e.g., Nvidia 3080) and how many GPUs did you use?
+ description: None
+ type: text
+ required: True
+ - name: If you are invited to present your work at our workshop, do you agree to submit a report and open-source your code for our verification?
+ description: Yes or No?
type: boolean
required: True
+ - name: Any remarks?
+ description: Please let us know any other info you want to share!
+ type: text
+ required: False
is_restricted_to_select_one_submission: False
is_partial_submission_evaluation_enabled: False
allowed_submission_file_types: ".json, .zip, .txt, .tsv, .gz, .csv, .h5, .npy, .npz"
@@ -82,8 +133,8 @@ challenge_phases:
leaderboard_public: True
is_public: True
is_submission_public: True
- start_date: 2019-01-01 00:00:00
- end_date: 2099-05-24 23:59:59
+ start_date: 2024-10-10 00:00:00
+ end_date: 2099-11-25 23:59:59
test_annotation_file: annotations/test_annotations_testsplit.json
codename: test
max_submissions_per_day: 5
@@ -99,27 +150,32 @@ challenge_phases:
- name: publication_url
is_visible: True
submission_meta_attributes:
- - name: TextAttribute
- description: Sample
+ - name: Did you use any pertained models, e.g., vision-language models (CLIP), vision models (DINOv2), etc?
+ description: Yes or No? If Yes, what models?
+ type: text
+ required: True
+ - name: Did you use additional training data, e.g., any data collected by yourself?
+ description: Yes or No? If Yes, what data?
+ type: text
+ - name: What types of GPUs did you use (e.g., Nvidia 3080) and how many GPUs did you use?
+ description: None
type: text
- - name: SingleOptionAttribute
- description: Sample
- type: radio
- options: ["A", "B", "C"]
- - name: MultipleChoiceAttribute
- description: Sample
- type: checkbox
- options: ["alpha", "beta", "gamma"]
- - name: TrueFalseField
- description: Sample
+ required: True
+ - name: If you are invited to present your work at our workshop, do you agree to submit a report and open-source your code for our verification?
+ description: Yes or No?
type: boolean
+ required: True
+ - name: Any remarks?
+ description: Please let us know any other info you want to share!
+ type: text
+ required: False
is_restricted_to_select_one_submission: False
is_partial_submission_evaluation_enabled: False
dataset_splits:
- id: 1
- name: Train Split
- codename: train_split
+ name: Dev Split
+ codename: val_split
- id: 2
name: Test Split
codename: test_split
@@ -128,18 +184,18 @@ challenge_phase_splits:
- challenge_phase_id: 1
leaderboard_id: 1
dataset_split_id: 1
- visibility: 1
- leaderboard_decimal_precision: 2
+ visibility: 3
+ leaderboard_decimal_precision: 3
is_leaderboard_order_descending: True
- challenge_phase_id: 2
leaderboard_id: 1
dataset_split_id: 1
- visibility: 3
+ visibility: 1
leaderboard_decimal_precision: 2
is_leaderboard_order_descending: True
- challenge_phase_id: 2
leaderboard_id: 1
dataset_split_id: 2
- visibility: 1
- leaderboard_decimal_precision: 2
+ visibility: 3
+ leaderboard_decimal_precision: 3
is_leaderboard_order_descending: True