-
Notifications
You must be signed in to change notification settings - Fork 155
/
Copy pathchallenge.py
78 lines (60 loc) · 3.03 KB
/
challenge.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import numpy as np
from typing import List, Optional
from pydantic import BaseModel, Field
from webgenie.challenges.challenge_types import (
ACCURACY_COMPETITION_TYPE,
QUALITY_COMPETITION_TYPE,
SEO_COMPETITION_TYPE,
BALANCED_COMPETITION_TYPE,
)
from webgenie.tasks.metric_types import (
ACCURACY_METRIC_NAME,
QUALITY_METRIC_NAME,
SEO_METRIC_NAME,
)
from webgenie.tasks.task import Task
from webgenie.tasks.solution import Solution
class Challenge(BaseModel):
task: Optional[Task] = Field(default=None, description="The task to be solved")
solutions: List[Solution] = Field(default=[], description="The solutions to the task")
competition_type: str = Field(default="", description="The type of competition")
session: int = Field(default=0, description="The session number")
async def calculate_scores(self) -> dict[str, np.ndarray]:
pass
class AccuracyChallenge(Challenge):
competition_type: str = Field(default=ACCURACY_COMPETITION_TYPE, description="The type of competition")
async def calculate_scores(self) -> dict[str, np.ndarray]:
scores = await self.task.generator.calculate_scores(self.task, self.solutions)
aggregated_scores = scores[ACCURACY_METRIC_NAME] * 0.9 + scores[QUALITY_METRIC_NAME] * 0.1
return aggregated_scores, scores
class SeoChallenge(Challenge):
competition_type: str = Field(default=SEO_COMPETITION_TYPE, description="The type of competition")
async def calculate_scores(self) -> dict[str, np.ndarray]:
scores = await self.task.generator.calculate_scores(self.task, self.solutions)
accuracy_scores = scores[ACCURACY_METRIC_NAME]
seo_scores = scores[SEO_METRIC_NAME]
aggregated_scores = np.where(accuracy_scores > 0.9, seo_scores, 0)
return aggregated_scores, scores
class QualityChallenge(Challenge):
competition_type: str = Field(default=QUALITY_COMPETITION_TYPE, description="The type of competition")
async def calculate_scores(self) -> dict[str, np.ndarray]:
scores = await self.task.generator.calculate_scores(self.task, self.solutions)
accuracy_scores = scores[ACCURACY_METRIC_NAME]
quality_scores = scores[QUALITY_METRIC_NAME]
aggregated_scores = np.where(accuracy_scores > 0.9, quality_scores, 0)
return aggregated_scores, scores
class BalancedChallenge(Challenge):
competition_type: str = Field(default=BALANCED_COMPETITION_TYPE, description="The type of competition")
async def calculate_scores(self) -> dict[str, np.ndarray]:
scores = await self.task.generator.calculate_scores(self.task, self.solutions)
accuracy_scores = scores[ACCURACY_METRIC_NAME]
quality_scores = scores[QUALITY_METRIC_NAME]
seo_scores = scores[SEO_METRIC_NAME]
aggregated_scores = accuracy_scores * 0.6 + quality_scores * 0.2 + seo_scores * 0.2
return aggregated_scores, scores
RESERVED_WEIGHTS = {
ACCURACY_COMPETITION_TYPE: 70,
BALANCED_COMPETITION_TYPE: 10,
SEO_COMPETITION_TYPE: 10,
QUALITY_COMPETITION_TYPE: 10,
}