diff --git a/cvat/apps/engine/filters.py b/cvat/apps/engine/filters.py index dc3fcc4b..89434b3a 100644 --- a/cvat/apps/engine/filters.py +++ b/cvat/apps/engine/filters.py @@ -45,7 +45,7 @@ class SearchFilter(filters.SearchFilter): search_fields = getattr(view, 'search_fields', []) full_description = self.search_description + \ - f' Avaliable search_fields: {search_fields}' + f' Available search_fields: {search_fields}' return [ coreapi.Field( @@ -62,7 +62,7 @@ class SearchFilter(filters.SearchFilter): def get_schema_operation_parameters(self, view): search_fields = getattr(view, 'search_fields', []) full_description = self.search_description + \ - f' Avaliable search_fields: {search_fields}' + f' Available search_fields: {search_fields}' return [{ 'name': self.search_param, @@ -100,7 +100,7 @@ class OrderingFilter(filters.OrderingFilter): ordering_fields = getattr(view, 'ordering_fields', []) full_description = self.ordering_description + \ - f' Avaliable ordering_fields: {ordering_fields}' + f' Available ordering_fields: {ordering_fields}' return [ coreapi.Field( @@ -117,7 +117,7 @@ class OrderingFilter(filters.OrderingFilter): def get_schema_operation_parameters(self, view): ordering_fields = getattr(view, 'ordering_fields', []) full_description = self.ordering_description + \ - f' Avaliable ordering_fields: {ordering_fields}' + f' Available ordering_fields: {ordering_fields}' return [{ 'name': self.ordering_param, @@ -206,7 +206,7 @@ class JsonLogicFilter(filters.BaseFilterBackend): filter_fields = getattr(view, 'filter_fields', []) full_description = self.filter_description + \ - f' Avaliable filter_fields: {filter_fields}' + f' Available filter_fields: {filter_fields}' return [ coreapi.Field( @@ -223,7 +223,7 @@ class JsonLogicFilter(filters.BaseFilterBackend): def get_schema_operation_parameters(self, view): filter_fields = getattr(view, 'filter_fields', []) full_description = self.filter_description + \ - f' Avaliable filter_fields: {filter_fields}' + f' Available filter_fields: {filter_fields}' return [ { 'name': self.filter_param, diff --git a/site/content/en/docs/administration/advanced/k8s_deployment_with_helm.md b/site/content/en/docs/administration/advanced/k8s_deployment_with_helm.md index 34558c76..c9d90943 100644 --- a/site/content/en/docs/administration/advanced/k8s_deployment_with_helm.md +++ b/site/content/en/docs/administration/advanced/k8s_deployment_with_helm.md @@ -115,8 +115,8 @@ Before starting, ensure that the following prerequisites are met: minikube addons enable registry minikube addons enable registry-aliases ``` - Before Docker container images can be pushed to your newly created unsecure registry, - you need to add its address (`$(minikube ip):5000`) to the list of unsecure registries to + Before Docker container images can be pushed to your newly created insecure registry, + you need to add its address (`$(minikube ip):5000`) to the list of insecure registries to instruct Docker to accept working against it: follow the instructions in the [Docker documentation](https://docs.docker.com/registry/insecure/#deploy-a-plain-http-registry) @@ -127,7 +127,7 @@ Before starting, ensure that the following prerequisites are met: ```shell nuctl --namespace create project cvat ``` -1. Finaly deploy the fuction, i.e.: +1. Finally deploy the function, i.e.: - using minikube registry: ```shell nuctl deploy --project-name cvat --path serverless/tensorflow/faster_rcnn_inception_v2_coco/nuclio --registry $(minikube ip):5000 --run-registry registry.minikube @@ -311,7 +311,7 @@ Then reference it in helm update/install command using `-f` flag ### Why you used external charts to provide redis and postgres? Because they definitely know what they do better then we are, so we are getting more quality and less support ### How to use custom domain name with k8s deployment: -The default value `cvat.local` may be overriden with `--set ingress.hosts[0].host` option like this: +The default value `cvat.local` may be overridden with `--set ingress.hosts[0].host` option like this: ```shell helm upgrade -n default cvat -i --create-namespace helm-chart -f helm-chart/values.yaml -f helm-chart/values.override.yaml --set ingress.hosts[0].host=YOUR_FQDN ``` diff --git a/site/content/en/docs/administration/advanced/upgrade_guide.md b/site/content/en/docs/administration/advanced/upgrade_guide.md index 33e7039d..a192e1e3 100644 --- a/site/content/en/docs/administration/advanced/upgrade_guide.md +++ b/site/content/en/docs/administration/advanced/upgrade_guide.md @@ -102,7 +102,7 @@ docker compose up -d 1. It is highly recommended backup all CVAT data before updating, follow the [backup guide](/docs/administration/advanced/backup_guide/) and backup CVAT database volume. -1. Run previosly used CVAT version as usual +1. Run previously used CVAT version as usual 1. Backup current database with `pg_dumpall` tool: ```shell diff --git a/site/content/en/docs/administration/basics/AWS-Deployment-Guide.md b/site/content/en/docs/administration/basics/AWS-Deployment-Guide.md index 1a815783..fd7dcf66 100644 --- a/site/content/en/docs/administration/basics/AWS-Deployment-Guide.md +++ b/site/content/en/docs/administration/basics/AWS-Deployment-Guide.md @@ -22,7 +22,7 @@ There are two ways of deploying the CVAT. [installation instructions](/docs/administration/basics/installation/). The additional step is to add a [security group and rule to allow incoming connections](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html). -For any of above, don't forget to set the `CVAT_HOST` environemnt variable to the exposed +For any of above, don't forget to set the `CVAT_HOST` environment variable to the exposed AWS public IP address or hostname: ```bash diff --git a/site/content/en/docs/administration/basics/admin-account.md b/site/content/en/docs/administration/basics/admin-account.md index 84b4c384..890ebf92 100644 --- a/site/content/en/docs/administration/basics/admin-account.md +++ b/site/content/en/docs/administration/basics/admin-account.md @@ -5,7 +5,7 @@ weight: 2 description: 'A CVAT installation guide to create a superuser.' --- -This section is for users who whant to be a bit more flexible with CVAT use. +This section is for users who want to be a bit more flexible with CVAT use. The user you register by default does not have full permissions on the instance, so you must create a superuser. diff --git a/site/content/en/docs/administration/basics/social-accounts-configuration.md b/site/content/en/docs/administration/basics/social-accounts-configuration.md index e4ec6f89..5175ce02 100644 --- a/site/content/en/docs/administration/basics/social-accounts-configuration.md +++ b/site/content/en/docs/administration/basics/social-accounts-configuration.md @@ -46,11 +46,11 @@ To enable authentication, do the following: docker compose -f docker-compose.yml -f docker-compose.override.yml up -d ``` -## Enable authentication with a Github account +## Enable authentication with a GitHub account -There are 2 basic steps to enable Github account authentication. +There are 2 basic steps to enable GitHub account authentication. -1. Open Github settings page. +1. Open GitHub settings page. 2. On the left menu, click **<> Developer settings** > **OAuth Apps** > **Register new application**.
For more information, see [Creating an OAuth App](https://docs.github.com/en/developers/apps/building-oauth-apps/creating-an-oauth-app) 3. Fill in the name field, set the homepage URL (for example: `https://localhost:8080`), @@ -75,7 +75,7 @@ There are 2 basic steps to enable Github account authentication. docker compose -f docker-compose.yml -f docker-compose.override.yml up -d ``` -> **Note:** You can also configure [Github App](https://docs.github.com/en/developers/apps/building-github-apps/creating-a-github-app), +> **Note:** You can also configure [GitHub App](https://docs.github.com/en/developers/apps/building-github-apps/creating-a-github-app), > but don't forget to add required permissions. >
In the **Permission** > **Account permissions** > **Email addresses** must be set to **read-only**. diff --git a/site/content/en/docs/faq.md b/site/content/en/docs/faq.md index cca302d8..3529a884 100644 --- a/site/content/en/docs/faq.md +++ b/site/content/en/docs/faq.md @@ -49,7 +49,7 @@ You should free up disk space or change the threshold, to do so check: [Elastics ## How to change default CVAT hostname or port -To change the hostname, simply set the `CVAT_HOST` environemnt variable +To change the hostname, simply set the `CVAT_HOST` environment variable ```bash export CVAT_HOST= diff --git a/site/content/en/docs/manual/advanced/ai-tools.md b/site/content/en/docs/manual/advanced/ai-tools.md index 7f59540c..3f2aefc7 100644 --- a/site/content/en/docs/manual/advanced/ai-tools.md +++ b/site/content/en/docs/manual/advanced/ai-tools.md @@ -201,12 +201,12 @@ see [Automatic annotation](/docs/manual/advanced/automatic-annotation/). | Model | Description | | ------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| Mask RCNN | The model generates polygons for each instance of an object in the image.

For more information, see:
  • [Github: Mask RCNN](https://github.com/matterport/Mask_RCNN)
  • [Paper: Mask RCNN](https://arxiv.org/pdf/1703.06870.pdf) | -| Faster RCNN | The model generates bounding boxes for each instance of an object in the image.
    In this model, RPN and Fast R-CNN are combined into a single network.

    For more information, see:
  • [Github: Faster RCNN](https://github.com/ShaoqingRen/faster_rcnn)
  • [Paper: Faster RCNN](https://arxiv.org/pdf/1506.01497.pdf) | -| YOLO v3 | YOLO v3 is a family of object detection architectures and models pre-trained on the COCO dataset.

    For more information, see:
  • [Github: YOLO v3](https://github.com/ultralytics/yolov3)
  • [Site: YOLO v3](https://docs.ultralytics.com/#yolov3)
  • [Paper: YOLO v3](https://arxiv.org/pdf/1804.02767v1.pdf) | -| YOLO v5 | YOLO v5 is a family of object detection architectures and models based on the Pytorch framework.

    For more information, see:
  • [Github: YOLO v5](https://github.com/ultralytics/yolov5)
  • [Site: YOLO v5](https://docs.ultralytics.com/#yolov5) | +| Mask RCNN | The model generates polygons for each instance of an object in the image.

    For more information, see:
  • [GitHub: Mask RCNN](https://github.com/matterport/Mask_RCNN)
  • [Paper: Mask RCNN](https://arxiv.org/pdf/1703.06870.pdf) | +| Faster RCNN | The model generates bounding boxes for each instance of an object in the image.
    In this model, RPN and Fast R-CNN are combined into a single network.

    For more information, see:
  • [GitHub: Faster RCNN](https://github.com/ShaoqingRen/faster_rcnn)
  • [Paper: Faster RCNN](https://arxiv.org/pdf/1506.01497.pdf) | +| YOLO v3 | YOLO v3 is a family of object detection architectures and models pre-trained on the COCO dataset.

    For more information, see:
  • [GitHub: YOLO v3](https://github.com/ultralytics/yolov3)
  • [Site: YOLO v3](https://docs.ultralytics.com/#yolov3)
  • [Paper: YOLO v3](https://arxiv.org/pdf/1804.02767v1.pdf) | +| YOLO v5 | YOLO v5 is a family of object detection architectures and models based on the Pytorch framework.

    For more information, see:
  • [GitHub: YOLO v5](https://github.com/ultralytics/yolov5)
  • [Site: YOLO v5](https://docs.ultralytics.com/#yolov5) | | Semantic segmentation for ADAS | This is a segmentation network to classify each pixel into 20 classes.

    For more information, see:
  • [Site: ADAS](https://docs.openvino.ai/2019_R1/_semantic_segmentation_adas_0001_description_semantic_segmentation_adas_0001.html) | -| Mask RCNN with Tensorflow | Mask RCNN version with Tensorflow. The model generates polygons for each instance of an object in the image.

    For more information, see:
  • [Github: Mask RCNN](https://github.com/matterport/Mask_RCNN)
  • [Paper: Mask RCNN](https://arxiv.org/pdf/1703.06870.pdf) | +| Mask RCNN with Tensorflow | Mask RCNN version with Tensorflow. The model generates polygons for each instance of an object in the image.

    For more information, see:
  • [GitHub: Mask RCNN](https://github.com/matterport/Mask_RCNN)
  • [Paper: Mask RCNN](https://arxiv.org/pdf/1703.06870.pdf) | | Faster RCNN with Tensorflow | Faster RCNN version with Tensorflow. The model generates bounding boxes for each instance of an object in the image.
    In this model, RPN and Fast R-CNN are combined into a single network.

    For more information, see:
  • [Site: Faster RCNN with Tensorflow](https://docs.openvino.ai/2021.4/omz_models_model_faster_rcnn_inception_v2_coco.html)
  • [Paper: Faster RCNN](https://arxiv.org/pdf/1506.01497.pdf) | | RetinaNet | Pytorch implementation of RetinaNet object detection.


    For more information, see:
  • [Specification: RetinaNet](https://paperswithcode.com/lib/detectron2/retinanet)
  • [Paper: RetinaNet](https://arxiv.org/pdf/1708.02002.pdf)
  • [Documentation: RetinaNet](https://detectron2.readthedocs.io/en/latest/tutorials/training.html) | | Face Detection | Face detector based on MobileNetV2 as a backbone for indoor and outdoor scenes shot by a front-facing camera.


    For more information, see:
  • [Site: Face Detection 0205](https://docs.openvino.ai/latest/omz_models_model_face_detection_0205.html) | @@ -276,8 +276,8 @@ All annotated objects will be automatically tracked when you move to the next fr | Model | Tool | Description | Example | | ----------------------------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------- | | TrackerMIL | OpenCV | TrackerMIL model is not bound to
    labels and can be used for any
    object. It is a fast client-side model
    designed to track simple non-overlapping objects.

    For more information, see:
  • [Article: Object Tracking using OpenCV](https://learnopencv.com/tag/mil/) | ![Annotation using a tracker](/images/tracker_mil_detrac.gif) | -| SiamMask | AI Tools | Fast online Object Tracking and Segmentation. The trackable object will
    be tracked automatically if the previous frame
    was the latest keyframe for the object.

    For more information, see:
  • [Github: SiamMask](https://github.com/foolwood/SiamMask)
  • [Paper: SiamMask](https://arxiv.org/pdf/1812.05050.pdf) | ![Annotation using a tracker](/images/tracker_ai_tools.gif) | -| Transformer Tracking (TransT) | AI Tools | Simple and efficient online tool for object tracking and segmentation.
    If the previous frame was the latest keyframe
    for the object, the trackable object will be tracked automatically.
    This is a modified version of the PyTracking
    Python framework based on Pytorch


    For more information, see:
  • [Github: TransT](https://github.com/chenxin-dlut/TransT)
  • [Paper: TransT](https://arxiv.org/pdf/2103.15436.pdf) | ![Annotation using a tracker](/images/tracker_transit.gif) | +| SiamMask | AI Tools | Fast online Object Tracking and Segmentation. The trackable object will
    be tracked automatically if the previous frame
    was the latest keyframe for the object.

    For more information, see:
  • [GitHub: SiamMask](https://github.com/foolwood/SiamMask)
  • [Paper: SiamMask](https://arxiv.org/pdf/1812.05050.pdf) | ![Annotation using a tracker](/images/tracker_ai_tools.gif) | +| Transformer Tracking (TransT) | AI Tools | Simple and efficient online tool for object tracking and segmentation.
    If the previous frame was the latest keyframe
    for the object, the trackable object will be tracked automatically.
    This is a modified version of the PyTracking
    Python framework based on Pytorch


    For more information, see:
  • [GitHub: TransT](https://github.com/chenxin-dlut/TransT)
  • [Paper: TransT](https://arxiv.org/pdf/2103.15436.pdf) | ![Annotation using a tracker](/images/tracker_transit.gif) | diff --git a/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md b/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md index efffdd92..5f8527a8 100644 --- a/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md +++ b/site/content/en/docs/manual/advanced/annotation-with-brush-tool.md @@ -36,7 +36,7 @@ It has the following elements: |![Brush](/images/brushing_tools_icon.png)| **Brush** adds new mask/ new regions to the previously added mask).| |![Eraser](/images/brushing_tools_erase.png)|**Eraser** removes part of the mask.| |![Add poly](/images/brushing_tools_add_poly.png)|**Polygon** selection tool. Selection will become a mask.| -|![Remove poly](/images/brushing_tools_remove_poly.png)|**Remove polygon selection** substracts part of the polygon selection.| +|![Remove poly](/images/brushing_tools_remove_poly.png)|**Remove polygon selection** subtracts part of the polygon selection.| |![Brush size](/images/brushing_tools_brush_size.png)|**Brush size** in pixels.
    **Note:** Visible only when **Brush** or **Eraser** are selected.| |![Brush shape](/images/brushing_tools_brush_shape.png)|**Brush shape** with two options: circle and square.
    **Note:** Visible only when **Brush** or **Eraser** are selected.| |![Pixel remove](/images/brushing_tools_pixels.png)|**Remove underlying pixels**. When you are drawing or editing a mask with this tool,
    pixels on other masks that are located at the same positions as the pixels of the
    current mask are deleted.| diff --git a/site/content/en/docs/manual/advanced/annotation-with-skeletons/creating-the-skeleton.md b/site/content/en/docs/manual/advanced/annotation-with-skeletons/creating-the-skeleton.md index 969be13c..29913395 100644 --- a/site/content/en/docs/manual/advanced/annotation-with-skeletons/creating-the-skeleton.md +++ b/site/content/en/docs/manual/advanced/annotation-with-skeletons/creating-the-skeleton.md @@ -25,7 +25,7 @@ It is a mode where you can add new skeleton points clicking the drawing area. - DRAW AN EDGE BETWEEN TWO POINTS - in this mode you can add an edge, clicking any two points, which are not joined yet. - REMOVE A DRAWN SKELETON POINTS - in this mode clicking a point will remove the point and all attached edges. -You can also remove an edge only, it will be highligted as red on hover. +You can also remove an edge only, it will be highlighted as red on hover. - DOWNLOAD DRAWN TEMPLATE AS AN .SVG - you can download setup configuration to use it in future - UPLOAD A TEMPLATE FROM AN .SVG FILE - you can upload previously downloaded configuration diff --git a/site/content/en/docs/manual/advanced/context-images.md b/site/content/en/docs/manual/advanced/context-images.md index ecb689e1..e64b25fa 100644 --- a/site/content/en/docs/manual/advanced/context-images.md +++ b/site/content/en/docs/manual/advanced/context-images.md @@ -85,7 +85,7 @@ When you add context images to the set, small toolbar will appear on the top of | ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ![contex_images_4](/images/context_img_04.jpg) | **Fit views**. Click to restore the layout to its original appearance.

    If you've expanded any images in the layout, they will returned to their original size.

    This won't affect the number of context images on the screen. | | ![contex_images_5](/images/context_img_05.jpg) | **Add new image**. Click to add context image to the layout. | -| ![contex_images_6](/images/context_img_06.jpg) | **Reload layout**. Click to reload layout to the default view.

    Note, that this action can change the number of context images reseting them back to three. | +| ![contex_images_6](/images/context_img_06.jpg) | **Reload layout**. Click to reload layout to the default view.

    Note, that this action can change the number of context images resetting them back to three. | diff --git a/site/content/en/docs/manual/advanced/serverless-tutorial.md b/site/content/en/docs/manual/advanced/serverless-tutorial.md index 476a61fc..3ce74e49 100644 --- a/site/content/en/docs/manual/advanced/serverless-tutorial.md +++ b/site/content/en/docs/manual/advanced/serverless-tutorial.md @@ -501,7 +501,7 @@ into memory using `init_context(context)` function. Read more about the function in [Best Practices and Common Pitfalls][nuclio-bkms-doc]. After that we need to accept incoming HTTP requests, run inference, -reply with detection results. For the process our entry point is resposible +reply with detection results. For the process our entry point is responsible which we specified in our function specification `handler(context, event)`. Again in accordance to function specification the entry point should be located inside `main.py`. diff --git a/site/content/en/docs/manual/advanced/shortcuts.md b/site/content/en/docs/manual/advanced/shortcuts.md index 68a38458..e11ed312 100644 --- a/site/content/en/docs/manual/advanced/shortcuts.md +++ b/site/content/en/docs/manual/advanced/shortcuts.md @@ -68,10 +68,10 @@ Many UI elements have shortcut hints. Put your pointer to a required element to | `Shift+Tab` | Go to the previous annotated object in current frame | | `` | Assign a corresponding value to the current attribute | | | _Standard 3d mode_ | -| `Shift+arrrowup` | Increases camera roll angle | -| `Shift+arrrowdown` | Decreases camera roll angle | -| `Shift+arrrowleft` | Decreases camera pitch angle | -| `Shift+arrrowright` | Increases camera pitch angle | +| `Shift+Up Arrow` | Increases camera roll angle | +| `Shift+Down Arrow` | Decreases camera roll angle | +| `Shift+Left Arrow` | Decreases camera pitch angle | +| `Shift+Right Arrow` | Increases camera pitch angle | | `Alt+O` | Move the camera up | | `Alt+U` | Move the camera down | | `Alt+J` | Move the camera left | diff --git a/site/content/en/docs/manual/basics/3d-object-annotation-basics.md b/site/content/en/docs/manual/basics/3d-object-annotation-basics.md index 2e7fdf92..b47cd61b 100644 --- a/site/content/en/docs/manual/basics/3d-object-annotation-basics.md +++ b/site/content/en/docs/manual/basics/3d-object-annotation-basics.md @@ -22,9 +22,9 @@ To move in 3D space you can use several methods: ![](/images/image216_carla_town3.jpg) You can move around by pressing the corresponding buttons: -- To rotate the camera use: `Shift+arrrowup`/`Shift+arrrowdown`/`Shift+arrrowleft`/`Shift+arrrowright`. -- To move left/right use: `Allt+J`/`Alt+L`. -- To move up/down use: `Alt-U`/`Alt+O`. +- To rotate the camera use: `Shift+Up Arrow`/`Shift+Down Arrow`/`Shift+Left Arrow`/`Shift+Right Arrow`. +- To move left/right use: `Alt+J`/`Alt+L`. +- To move up/down use: `Alt+U`/`Alt+O`. - To zoom in/out use: `Alt+K`/`Alt+I`. ### Creating a cuboid diff --git a/site/content/en/docs/manual/basics/create_an_annotation_task.md b/site/content/en/docs/manual/basics/create_an_annotation_task.md index 87f6be27..f8457067 100644 --- a/site/content/en/docs/manual/basics/create_an_annotation_task.md +++ b/site/content/en/docs/manual/basics/create_an_annotation_task.md @@ -73,7 +73,7 @@ label to any particular shape tool. For example, you added: - Label `sun` with the **Label shape** type `ellipse` -- Lable `car` with the **Label shape** type `any` +- Label `car` with the **Label shape** type `any` As a result: @@ -138,7 +138,7 @@ To add an attribute, do the following: 2. In the **Name** field enter the name of the attribute. 3. From the drop-down, select way to display the attribute in the **Objects menu**: - - `Select` enables a drop-down list, from which you can select an attibute.
    If in + - `Select` enables a drop-down list, from which you can select an attribute.
    If in the **Attribute value** field you add `__undefined__`, the drop-down list will have a blank value.
    This is useful for cases where the attribute of the object cannot be clarified: diff --git a/site/content/en/docs/manual/basics/registration.md b/site/content/en/docs/manual/basics/registration.md index 32b430d5..97483b82 100644 --- a/site/content/en/docs/manual/basics/registration.md +++ b/site/content/en/docs/manual/basics/registration.md @@ -8,7 +8,7 @@ description: 'App CVAT user registration and account access.' To start to annotate in CVAT, you need to create an account or log in to the existing account. -This section describes [App CVAT](https://app.cvat.ai/), that is sutable +This section describes [App CVAT](https://app.cvat.ai/), that is suitable for small personal projects, that do not require user management. It is also ok to use if you just want to try what is CVAT. @@ -46,7 +46,7 @@ To register as a non-admin user, do the following: ![Usernname generation](/images/filling_email.gif) -To register with Google or Github, click the button with the name of the service, and folow instructions on the screen. +To register with Google or GitHub, click the button with the name of the service, and follow instructions on the screen. ### Account access @@ -56,4 +56,4 @@ To access your account, do the following: 2. Enter username or email. The password field will appear. 3. Enter the password and click **Next**. -To log in with Google or Github, click the button with the name of the service. +To log in with Google or GitHub, click the button with the name of the service. diff --git a/site/content/en/docs/manual/basics/settings.md b/site/content/en/docs/manual/basics/settings.md index 166f562e..c1b1994e 100644 --- a/site/content/en/docs/manual/basics/settings.md +++ b/site/content/en/docs/manual/basics/settings.md @@ -61,7 +61,7 @@ In tab `Workspace` you can: for shape in the attribute annotation mode. - `Control points size` — defines a size of any interactable points in the tool -(polygon's vertexes, rectangle dragging points, etc.) +(polygon's vertices, rectangle dragging points, etc.) - `Default number of points in polygon approximation` With this setting, you can choose the default number of points in polygon.