Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 28 additions & 0 deletions .github/workflows/auto-format.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: auto-format
Copy link
Contributor

@michael-groundlight michael-groundlight Oct 25, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In our Makefile, there is a generate target for generating the generated/ code folder from an OpenAPI spec. Let's add black to that target, as it will make the code diffs simpler (versus seeing a big diff, and then seeing it go away in the action). E.g.

generate:
    ...
    poetry run black .

on: pull_request
jobs:
format:
# Check if the PR is not from a fork
if: github.event.pull_request.head.repo.full_name == github.repository
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
with:
ref: ${{ github.head_ref }}
- name: black
#uses: lgeiger/black-action@v1.0.1
uses: psf/black@stable
with:
src: "."
options: "--verbose"
- name: Check for modified files
id: git-check
run: echo ::set-output name=modified::$(if git diff-index --quiet HEAD --; then echo "false"; else echo "true"; fi)
- name: Push changes
if: steps.git-check.outputs.modified == 'true'
run: |
git config --global user.name 'Auto-format Bot'
git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}
git commit -am "Automatically reformatting code with black"
git push

2 changes: 2 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ generate: install ## Generate the SDK from our public openapi spec
-g python \
-o ./generated
poetry run datamodel-codegen --input spec/public-api.yaml --output generated/model.py
poetry run black .


test-local: install ## Run integration tests against an API server running at http://localhost:8000/device-api (needs GROUNDLIGHT_API_TOKEN)
GROUNDLIGHT_TEST_API_ENDPOINT="http://localhost:8000/device-api" poetry run pytest --cov=src test --log-cli-level INFO
Expand Down
70 changes: 25 additions & 45 deletions generated/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,91 +14,71 @@
class ClassificationResult(BaseModel):
confidence: Optional[confloat(ge=0.0, le=1.0)] = Field(
None,
description='On a scale of 0 to 1, how confident are we in the predicted label?',
description="On a scale of 0 to 1, how confident are we in the predicted label?",
)
label: str = Field(..., description='What is the predicted label?')
label: str = Field(..., description="What is the predicted label?")


class DetectorCreationInput(BaseModel):
name: constr(max_length=200) = Field(
..., description='A short, descriptive name for the detector.'
)
query: constr(max_length=300) = Field(
..., description='A question about the image.'
)
name: constr(max_length=200) = Field(..., description="A short, descriptive name for the detector.")
query: constr(max_length=300) = Field(..., description="A question about the image.")
group_name: Optional[constr(max_length=100)] = Field(
None, description='Which group should this detector be part of?'
None, description="Which group should this detector be part of?"
)
confidence_threshold: Optional[confloat(ge=0.0, le=1.0)] = Field(
0.9,
description="If the detector's prediction is below this confidence threshold, send the image query for human review.",
)
config_name: Optional[constr(max_length=100)] = Field(
None,
description='(Advanced usage) If your account has multiple named ML configuration options enabled, you can use this field to specify which one you would like to use.',
description="(Advanced usage) If your account has multiple named ML configuration options enabled, you can use this field to specify which one you would like to use.",
)


class DetectorTypeEnum(Enum):
detector = 'detector'
detector = "detector"


class ImageQueryTypeEnum(Enum):
image_query = 'image_query'
image_query = "image_query"


class ResultTypeEnum(Enum):
binary_classification = 'binary_classification'
binary_classification = "binary_classification"


class Detector(BaseModel):
id: str = Field(..., description='A unique ID for this object.')
type: DetectorTypeEnum = Field(..., description='The type of this object.')
created_at: datetime = Field(..., description='When this detector was created.')
name: constr(max_length=200) = Field(
..., description='A short, descriptive name for the detector.'
)
query: str = Field(..., description='A question about the image.')
group_name: str = Field(
..., description='Which group should this detector be part of?'
)
id: str = Field(..., description="A unique ID for this object.")
type: DetectorTypeEnum = Field(..., description="The type of this object.")
created_at: datetime = Field(..., description="When this detector was created.")
name: constr(max_length=200) = Field(..., description="A short, descriptive name for the detector.")
query: str = Field(..., description="A question about the image.")
group_name: str = Field(..., description="Which group should this detector be part of?")
confidence_threshold: Optional[confloat(ge=0.0, le=1.0)] = Field(
0.9,
description="If the detector's prediction is below this confidence threshold, send the image query for human review.",
)


class ImageQuery(BaseModel):
id: str = Field(..., description='A unique ID for this object.')
type: ImageQueryTypeEnum = Field(..., description='The type of this object.')
created_at: datetime = Field(..., description='When was this detector created?')
query: str = Field(..., description='A question about the image.')
detector_id: str = Field(
..., description='Which detector was used on this image query?'
)
result_type: ResultTypeEnum = Field(
..., description='What type of result are we returning?'
)
id: str = Field(..., description="A unique ID for this object.")
type: ImageQueryTypeEnum = Field(..., description="The type of this object.")
created_at: datetime = Field(..., description="When was this detector created?")
query: str = Field(..., description="A question about the image.")
detector_id: str = Field(..., description="Which detector was used on this image query?")
result_type: ResultTypeEnum = Field(..., description="What type of result are we returning?")
result: ClassificationResult


class PaginatedDetectorList(BaseModel):
count: Optional[int] = Field(None, example=123)
next: Optional[AnyUrl] = Field(
None, example='http://api.example.org/accounts/?page=4'
)
previous: Optional[AnyUrl] = Field(
None, example='http://api.example.org/accounts/?page=2'
)
next: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=4")
previous: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=2")
results: Optional[List[Detector]] = None


class PaginatedImageQueryList(BaseModel):
count: Optional[int] = Field(None, example=123)
next: Optional[AnyUrl] = Field(
None, example='http://api.example.org/accounts/?page=4'
)
previous: Optional[AnyUrl] = Field(
None, example='http://api.example.org/accounts/?page=2'
)
next: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=4")
previous: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=2")
results: Optional[List[ImageQuery]] = None
Loading