We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
2 parents 91c9856 + 208a654 commit 64e84e1Copy full SHA for 64e84e1
7 files changed
.github/workflows/docs.yml
@@ -10,7 +10,7 @@ on:
10
11
jobs:
12
tests:
13
- name: "Tests"
+ name: "Docs"
14
runs-on: ubuntu-latest
15
steps:
16
- uses: actions/checkout@v2
@@ -20,20 +20,34 @@ jobs:
20
- name: Install dependencies
21
run: |
22
python -m pip install --upgrade pip
23
- python -m pip install -e .[dev,torch]
+ python -m pip install -e .[dev]
24
- name: build docs
25
26
- cd docs
27
- make html
+ make docs
28
- name: save docs
29
uses: actions/upload-artifact@v3
30
with:
31
name: docs-html
32
path: docs/_build/html/
33
- - name: publish docs
+ - name: publish dev docs
34
if: github.ref_name == 'main' && github.ref_type == 'branch'
35
uses: peaceiris/actions-gh-pages@v3
36
37
github_token: ${{ secrets.GITHUB_TOKEN }}
38
publish_dir: docs/_build/*
39
publish_branch: gh-pages
+ destination_dir: main
40
+ - name: publish stable docs
41
+ if: github.ref_type == 'tag' && startswith(github.ref, 'refs/tags/v')
42
+ uses: peaceiris/actions-gh-pages@v3
43
+ with:
44
+ github_token: ${{ secrets.GITHUB_TOKEN }}
45
+ publish_dir: docs/_build/*
46
+ publish_branch: gh-pages
47
+ destination_dir: github.ref_name
48
+ - name: create symlink stable to new version
49
50
+ run: |
51
+ rm stable
52
+ ln -s "${{ github.ref_name }}" stable
53
+ ls -la
.github/workflows/weekly.yml
workflow_dispatch:
schedule:
# At 00:00 on Monday
- - cron: 0 0 * * MON
+ - cron: "0 0 * * MON"
vetiver_main_pins_main:
docs/source/advancedusage/custom_handler.md
@@ -8,7 +8,7 @@ This example shows a custom handler of `newmodeltype` type.
8
from vetiver.handlers.base import BaseHandler
9
class CustomHandler(BaseHandler):
- def __init__(model, ptype_data):
+ def __init__(self, model, ptype_data):
super().__init__(model, ptype_data)
model_type = staticmethod(lambda: newmodeltype)
docs/source/index.rst
@@ -10,15 +10,14 @@ You can install the released version of vetiver from `PyPI <https://pypi.org/pro
.. code-block:: bash
- pip install vetiver
+ python -m pip install vetiver
And the development version from `GitHub <https://github.com/rstudio/vetiver-python>`_ with:
17
18
19
python -m pip install git+https://github.com/rstudio/vetiver-python
-
This website documents the public API of Vetiver (for Python). See `vetiver.rstudio.com <https://vetiver.rstudio.com>`_ for
more on how to get started.
@@ -36,7 +35,8 @@ Version
:caption: Version
~VetiverModel
- ~pin_read_write.vetiver_pin_write
+ ~vetiver_pin_write
+ ~vetiver_create_ptype
Deploy
==================
@@ -52,6 +52,7 @@ Deploy
~predict
~write_app
54
~write_docker
55
+ ~deploy_rsconnect
56
57
Monitor
58
setup.cfg
@@ -1,6 +1,5 @@
1
[metadata]
2
name = vetiver
3
-version = 0.1.5
4
description = Deploy models into REST endpoints
5
long_description = file: README.md
6
long_description_content_type = text/markdown
@@ -44,7 +43,7 @@ dev =
pytest-snapshot
sphinx
sphinx-autodoc-typehints
- sphinx-book-theme
+ sphinx-book-theme==0.3.3
myst-parser
torch =
vetiver/__init__.py
@@ -10,7 +10,7 @@
from .attach_pkgs import * # noqa
from .meta import * # noqa
from .write_docker import write_docker # noqa
-from .write_fastapi import write_app # noqa
+from .write_fastapi import write_app, vetiver_write_app # noqa
from .handlers.base import BaseHandler, create_handler, InvalidModelError # noqa
from .handlers.sklearn import SKLearnHandler # noqa
from .handlers.torch import TorchHandler # noqa
vetiver/monitor.py
@@ -32,13 +32,17 @@ def compute_metrics(
Example
-------
- from sklearn import metrics
- rng = pd.date_range("1/1/2012", periods=10, freq="S")
- new = dict(x=range(len(rng)), y = range(len(rng)))
- df = pd.DataFrame(new, index = rng).reset_index(inplace=True)
- td = timedelta(seconds = 2)
- metric_set = [sklearn.metrics.mean_squared_error, sklearn.metrics.mean_absolute_error]
- compute_metrics(df, "index", td, metric_set=metric_set, truth="x", estimate="y")
+ >>> from sklearn.metrics import mean_squared_error, mean_absolute_error
+ >>> df = pd.DataFrame(
+ ... {
+ ... "index": ["2021-01-01", "2021-01-02", "2021-01-03"],
+ ... "truth": [200, 201, 199],
+ ... "pred": [198, 200, 199],
+ ... }
+ ... )
+ >>> td = timedelta(days = 1)
+ >>> metric_set = [mean_squared_error, mean_absolute_error]
+ >>> metrics = compute_metrics(df, "index", td, metric_set, "truth", "pred")
"""
0 commit comments