Skip to content

Commit a8091a1

Browse files
vertex-sdk-botcopybara-github
authored andcommitted
feat: Add metric_resource_name to evaluation types and methods
PiperOrigin-RevId: 877145985
1 parent 429a182 commit a8091a1

2 files changed

Lines changed: 48 additions & 0 deletions

File tree

vertexai/_genai/evals.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,12 @@ def _EvaluationRunMetric_from_vertex(
309309
["metric_config"],
310310
_UnifiedMetric_from_vertex(getv(from_object, ["metricConfig"]), to_object),
311311
)
312+
if getv(from_object, ["metricResourceName"]) is not None:
313+
setv(
314+
to_object,
315+
["metric_resource_name"],
316+
getv(from_object, ["metricResourceName"]),
317+
)
312318

313319
return to_object
314320

@@ -327,6 +333,12 @@ def _EvaluationRunMetric_to_vertex(
327333
["metricConfig"],
328334
_UnifiedMetric_to_vertex(getv(from_object, ["metric_config"]), to_object),
329335
)
336+
if getv(from_object, ["metric_resource_name"]) is not None:
337+
setv(
338+
to_object,
339+
["metricResourceName"],
340+
getv(from_object, ["metric_resource_name"]),
341+
)
330342

331343
return to_object
332344

@@ -415,6 +427,12 @@ def _GenerateInstanceRubricsRequest_to_vertex(
415427
getv(from_object, ["rubric_generation_spec"]), to_object
416428
),
417429
)
430+
if getv(from_object, ["metric_resource_name"]) is not None:
431+
setv(
432+
to_object,
433+
["metricResourceName"],
434+
getv(from_object, ["metric_resource_name"]),
435+
)
418436

419437
if getv(from_object, ["config"]) is not None:
420438
setv(to_object, ["config"], getv(from_object, ["config"]))
@@ -574,6 +592,12 @@ def _RubricGenerationSpec_to_vertex(
574592
["rubricTypeOntology"],
575593
getv(from_object, ["rubric_type_ontology"]),
576594
)
595+
if getv(from_object, ["metric_resource_name"]) is not None:
596+
setv(
597+
to_object,
598+
["metricResourceName"],
599+
getv(from_object, ["metric_resource_name"]),
600+
)
577601

578602
return to_object
579603

@@ -949,6 +973,7 @@ def _generate_rubrics(
949973
self,
950974
*,
951975
contents: list[genai_types.ContentOrDict],
976+
metric_resource_name: Optional[str] = None,
952977
predefined_rubric_generation_spec: Optional[
953978
types.PredefinedMetricSpecOrDict
954979
] = None,
@@ -963,6 +988,7 @@ def _generate_rubrics(
963988
contents=contents,
964989
predefined_rubric_generation_spec=predefined_rubric_generation_spec,
965990
rubric_generation_spec=rubric_generation_spec,
991+
metric_resource_name=metric_resource_name,
966992
config=config,
967993
)
968994

@@ -1463,6 +1489,7 @@ def generate_rubrics(
14631489
generator_model_config: Optional["genai_types.AutoraterConfigOrDict"] = None,
14641490
rubric_content_type: Optional["types.RubricContentType"] = None,
14651491
rubric_type_ontology: Optional[list[str]] = None,
1492+
metric_resource_name: Optional[str] = None,
14661493
predefined_spec_name: Optional[Union[str, "types.PrebuiltMetric"]] = None,
14671494
metric_spec_parameters: Optional[dict[str, Any]] = None,
14681495
config: Optional[types.RubricGenerationConfigOrDict] = None,
@@ -1498,6 +1525,7 @@ def generate_rubrics(
14981525
generated. Only used if `prompt_template` is provided.
14991526
rubric_type_ontology: Optional. A pre-defined list of allowed types
15001527
for generated rubrics. Only used if `prompt_template` is provided.
1528+
metric_resource_name: Optional. The resource name of the metric definition.
15011529
predefined_spec_name: Optional. The name of a Predefined Metric to use
15021530
for rubric generation (e.g., "general_quality_v1") or a types.PrebuiltMetric object.
15031531
Mutually exclusive with `prompt_template` and its related parameters.
@@ -1601,6 +1629,8 @@ def generate_rubrics(
16011629
}
16021630
spec_dict = {k: v for k, v in spec_dict.items() if v is not None}
16031631
rubric_gen_spec = types.RubricGenerationSpec.model_validate(spec_dict)
1632+
elif metric_resource_name:
1633+
pass
16041634
else:
16051635
raise ValueError(
16061636
"Either predefined_spec_name or prompt_template must be provided."
@@ -1626,6 +1656,7 @@ def generate_rubrics(
16261656
contents=contents,
16271657
rubric_generation_spec=rubric_gen_spec,
16281658
predefined_rubric_generation_spec=predefined_spec,
1659+
metric_resource_name=metric_resource_name,
16291660
config=config,
16301661
)
16311662
rubric_group = {rubric_group_name: response.generated_rubrics}

vertexai/_genai/types/common.py

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2261,6 +2261,10 @@ class RubricGenerationSpec(_common.BaseModel):
22612261
If this field is provided, it implies `include_rubric_type` should be true,
22622262
and the generated rubric types should be chosen from this ontology.""",
22632263
)
2264+
metric_resource_name: Optional[str] = Field(
2265+
default=None,
2266+
description="""The resource name of the metric definition.""",
2267+
)
22642268

22652269

22662270
class RubricGenerationSpecDict(TypedDict, total=False):
@@ -2283,6 +2287,8 @@ class RubricGenerationSpecDict(TypedDict, total=False):
22832287
If this field is provided, it implies `include_rubric_type` should be true,
22842288
and the generated rubric types should be chosen from this ontology."""
22852289

2290+
metric_resource_name: Optional[str]
2291+
22862292

22872293
RubricGenerationSpecOrDict = Union[RubricGenerationSpec, RubricGenerationSpecDict]
22882294

@@ -2469,6 +2475,10 @@ class EvaluationRunMetric(_common.BaseModel):
24692475
metric_config: Optional[UnifiedMetric] = Field(
24702476
default=None, description="""The unified metric used for evaluation run."""
24712477
)
2478+
metric_resource_name: Optional[str] = Field(
2479+
default=None,
2480+
description="""The resource name of the metric definition.""",
2481+
)
24722482

24732483

24742484
class EvaluationRunMetricDict(TypedDict, total=False):
@@ -2480,6 +2490,8 @@ class EvaluationRunMetricDict(TypedDict, total=False):
24802490
metric_config: Optional[UnifiedMetricDict]
24812491
"""The unified metric used for evaluation run."""
24822492

2493+
metric_resource_name: Optional[str]
2494+
24832495

24842496
EvaluationRunMetricOrDict = Union[EvaluationRunMetric, EvaluationRunMetricDict]
24852497

@@ -5249,6 +5261,10 @@ class _GenerateInstanceRubricsRequest(_common.BaseModel):
52495261
default=None,
52505262
description="""Specification for how the rubrics should be generated.""",
52515263
)
5264+
metric_resource_name: Optional[str] = Field(
5265+
default=None,
5266+
description="""The resource name of the metric definition.""",
5267+
)
52525268
config: Optional[RubricGenerationConfig] = Field(default=None, description="""""")
52535269

52545270

@@ -5269,6 +5285,7 @@ class _GenerateInstanceRubricsRequestDict(TypedDict, total=False):
52695285
rubric_generation_spec: Optional[RubricGenerationSpecDict]
52705286
"""Specification for how the rubrics should be generated."""
52715287

5288+
metric_resource_name: Optional[str]
52725289
config: Optional[RubricGenerationConfigDict]
52735290
""""""
52745291

0 commit comments

Comments
 (0)