diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 000000000..a199226df
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,25 @@
+---
+name: Bug report
+about: Create a bug report or request for help
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+**Versions**
+Details of your environment, including:
+ - Tableau Server version (or note if using Tableau Online)
+ - Python version
+ - TSC library version
+
+**To Reproduce**
+Steps to reproduce the behavior. Please include a code snippet where possible.
+
+**Results**
+What are the results or error messages received?
+
+**NOTE:** Be careful not to post user names, passwords, auth tokens or any other private or sensitive information.
diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
index 45b9548c1..61476132f 100644
--- a/.github/workflows/run-tests.yml
+++ b/.github/workflows/run-tests.yml
@@ -1,20 +1,21 @@
-name: Python package
+name: Python tests
on: [push]
jobs:
build:
-
- runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
- python-version: [3.5, 3.6, 3.7, 3.8, 3.9]
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ python-version: [3.6, 3.7, 3.8, 3.9, 3.10.0-rc.2]
+
+ runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- - name: Set up Python ${{ matrix.python-version }}
+ - name: Set up Python ${{ matrix.python-version }} on ${{ matrix.os }}
uses: actions/setup-python@v2
with:
python-version: ${{ matrix.python-version }}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c4c9197f5..e375f8385 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,11 @@
+## 0.17.0 (20 October 2021)
+Update publish.sh to use python3 (#866)
+Fixed jobs.get_by_id(job_id) example & reference docs (#867, #868)
+Fixed handling for workbooks in personal spaces which do not have projectID or Name (#875)
+Updated links to Data Source Methods page in REST API docs (#879)
+Upgraded to newer Slack action provider (#880)
+Added support to the package for getting flow run status, as well as the ability to cancel flow runs. (#884)
+
## 0.16.0 (15 July 2021)
* Documentation updates (#800, #818, #839, #842)
* Fixed data alert repr in subscription item (#821)
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 74b20d93d..89b8d213c 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -59,3 +59,4 @@ The following people have contributed to this project to make it possible, and w
* [Dan Zucker](https://github.com/dzucker-tab)
* [Brian Cantoni](https://github.com/bcantoni)
* [Ovini Nanayakkara](https://github.com/ovinis)
+* [Manish Muttreja](https://github.com/mmuttreja-tableau)
diff --git a/README.md b/README.md
index 1aed88d61..b454dd4c7 100644
--- a/README.md
+++ b/README.md
@@ -8,7 +8,8 @@ Use the Tableau Server Client (TSC) library to increase your productivity as you
* Create users and groups.
* Query projects, sites, and more.
-This repository contains Python source code and sample files. Python versions 3.5 and up are supported.
+This repository contains Python source code and sample files. Python versions 3.6 and up are supported.
For more information on installing and using TSC, see the documentation:
+
diff --git a/publish.sh b/publish.sh
index 99a3115ec..02812c1c3 100755
--- a/publish.sh
+++ b/publish.sh
@@ -3,7 +3,6 @@
set -e
rm -rf dist
-python setup.py sdist
-python setup.py bdist_wheel
+python3 setup.py sdist
python3 setup.py bdist_wheel
twine upload dist/*
diff --git a/samples/add_default_permission.py b/samples/add_default_permission.py
index 63c38f53d..8018c7b30 100644
--- a/samples/add_default_permission.py
+++ b/samples/add_default_permission.py
@@ -1,6 +1,6 @@
####
# This script demonstrates how to add default permissions using TSC
-# To run the script, you must have installed Python 3.5 and later.
+# To run the script, you must have installed Python 3.6 or later.
#
# In order to demonstrate adding a new default permission, this sample will create
# a new project and add a new capability to the new project, for the default "All users" group.
@@ -10,7 +10,6 @@
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -18,27 +17,26 @@
def main():
parser = argparse.ArgumentParser(description='Add workbook default permissions for a given project.')
- parser.add_argument('--server', '-s', required=True, help='Server address')
- parser.add_argument('--username', '-u', required=True, help='Username to sign into server')
- parser.add_argument('--site', '-S', default=None, help='Site to sign into - default site if not provided')
- parser.add_argument('-p', default=None, help='Password to sign into server')
-
+ # Common options; please keep those in sync across all samples
+ parser.add_argument('--server', '-s', required=True, help='server address')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- # Sign in
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
+ # Sign in to server
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
diff --git a/samples/create_group.py b/samples/create_group.py
index 7f9dc1e96..ad0e6cc4f 100644
--- a/samples/create_group.py
+++ b/samples/create_group.py
@@ -2,12 +2,11 @@
# This script demonstrates how to create a group using the Tableau
# Server Client.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
from datetime import time
@@ -18,20 +17,26 @@
def main():
parser = argparse.ArgumentParser(description='Creates a sample user group.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
- args = parser.parse_args()
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
- password = getpass.getpass("Password: ")
+ args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
group = TSC.GroupItem('test')
group = server.groups.create(group)
diff --git a/samples/create_project.py b/samples/create_project.py
index 0380cb8a0..814d35617 100644
--- a/samples/create_project.py
+++ b/samples/create_project.py
@@ -4,11 +4,10 @@
# parent_id.
#
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import sys
@@ -27,28 +26,26 @@ def create_project(server, project_item):
def main():
parser = argparse.ArgumentParser(description='Create new projects.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('-p', default=None, help='password')
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Use highest Server REST API version available
server.use_server_version()
diff --git a/samples/create_schedules.py b/samples/create_schedules.py
index c1bcb712f..39332713b 100644
--- a/samples/create_schedules.py
+++ b/samples/create_schedules.py
@@ -2,12 +2,11 @@
# This script demonstrates how to create schedules using the Tableau
# Server Client.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
from datetime import time
@@ -18,20 +17,26 @@
def main():
parser = argparse.ArgumentParser(description='Creates sample schedules for each type of frequency.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
- args = parser.parse_args()
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
- password = getpass.getpass("Password: ")
+ args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Hourly Schedule
# This schedule will run every 2 hours between 2:30AM and 11:00PM
diff --git a/samples/download_view_image.py b/samples/download_view_image.py
index 07162eebf..3ac2ed4d5 100644
--- a/samples/download_view_image.py
+++ b/samples/download_view_image.py
@@ -5,11 +5,10 @@
# For more information, refer to the documentations on 'Query View Image'
# (https://onlinehelp.tableau.com/current/api/rest_api/en-us/help.htm)
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -18,34 +17,30 @@
def main():
parser = argparse.ArgumentParser(description='Download image of a specified view.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--site-id', '-si', required=False,
- help='content url for site the view is on')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--view-name', '-v', required=True,
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
+ parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
+ help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--view-name', '-vn', required=True,
help='name of view to download an image of')
parser.add_argument('--filepath', '-f', required=True, help='filepath to save the image returned')
parser.add_argument('--maxage', '-m', required=False, help='max age of the image in the cache in minutes.')
- parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
- help='desired logging level (set to error by default)')
args = parser.parse_args()
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server.
- site_id = args.site_id
- if not site_id:
- site_id = ""
- tableau_auth = TSC.TableauAuth(args.username, password, site_id=site_id)
- server = TSC.Server(args.server)
- # The new endpoint was introduced in Version 2.5
- server.version = "2.5"
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Step 2: Query for the view that we want an image of
req_option = TSC.RequestOptions()
diff --git a/samples/explore_datasource.py b/samples/explore_datasource.py
index e740d60f1..a78345122 100644
--- a/samples/explore_datasource.py
+++ b/samples/explore_datasource.py
@@ -10,7 +10,6 @@
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -19,25 +18,28 @@
def main():
parser = argparse.ArgumentParser(description='Explore datasource functions supported by the Server API.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--publish', '-p', metavar='FILEPATH', help='path to datasource to publish')
- parser.add_argument('--download', '-d', metavar='FILEPATH', help='path to save downloaded datasource')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--publish', metavar='FILEPATH', help='path to datasource to publish')
+ parser.add_argument('--download', metavar='FILEPATH', help='path to save downloaded datasource')
args = parser.parse_args()
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
- server.use_highest_version()
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Query projects for use when demonstrating publishing and updating
all_projects, pagination_item = server.projects.get()
diff --git a/samples/explore_webhooks.py b/samples/explore_webhooks.py
index ab94f7195..50c677cba 100644
--- a/samples/explore_webhooks.py
+++ b/samples/explore_webhooks.py
@@ -10,7 +10,6 @@
####
import argparse
-import getpass
import logging
import os.path
@@ -20,35 +19,28 @@
def main():
parser = argparse.ArgumentParser(description='Explore webhook functions supported by the Server API.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('-p', default=None, help='password')
- parser.add_argument('--create', '-c', help='create a webhook')
- parser.add_argument('--delete', '-d', help='delete a webhook', action='store_true')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--create', help='create a webhook')
+ parser.add_argument('--delete', help='delete a webhook', action='store_true')
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
- print("Signing in to " + args.server + " [" + args.site + "] as " + args.username)
- server = TSC.Server(args.server)
-
- # Set http options to disable verifying SSL
- server.add_http_options({'verify': False})
-
- server.use_server_version()
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Create webhook if create flag is set (-create, -c)
diff --git a/samples/explore_workbook.py b/samples/explore_workbook.py
index 88eebc1a3..8746db80e 100644
--- a/samples/explore_workbook.py
+++ b/samples/explore_workbook.py
@@ -10,7 +10,6 @@
####
import argparse
-import getpass
import logging
import os.path
@@ -20,33 +19,34 @@
def main():
parser = argparse.ArgumentParser(description='Explore workbook functions supported by the Server API.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--publish', '-p', metavar='FILEPATH', help='path to workbook to publish')
- parser.add_argument('--download', '-d', metavar='FILEPATH', help='path to save downloaded workbook')
- parser.add_argument('--preview-image', '-i', metavar='FILENAME',
- help='filename (a .png file) to save the preview image')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--publish', metavar='FILEPATH', help='path to workbook to publish')
+ parser.add_argument('--download', metavar='FILEPATH', help='path to save downloaded workbook')
+ parser.add_argument('--preview-image', '-i', metavar='FILENAME',
+ help='filename (a .png file) to save the preview image')
args = parser.parse_args()
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
- server.use_highest_version()
-
- overwrite_true = TSC.Server.PublishMode.Overwrite
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Publish workbook if publish flag is set (-publish, -p)
+ overwrite_true = TSC.Server.PublishMode.Overwrite
if args.publish:
all_projects, pagination_item = server.projects.get()
default_project = next((project for project in all_projects if project.is_default()), None)
diff --git a/samples/export.py b/samples/export.py
index b8cd01140..6317ec53b 100644
--- a/samples/export.py
+++ b/samples/export.py
@@ -2,11 +2,10 @@
# This script demonstrates how to export a view using the Tableau
# Server Client.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -14,13 +13,16 @@
def main():
parser = argparse.ArgumentParser(description='Export a view as an image, PDF, or CSV')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('-p', default=None)
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--pdf', dest='type', action='store_const', const=('populate_pdf', 'PDFRequestOptions', 'pdf',
'pdf'))
@@ -36,16 +38,11 @@ def main():
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
views = filter(lambda x: x.id == args.resource_id,
diff --git a/samples/export_wb.py b/samples/export_wb.py
index 334d57c89..2be476130 100644
--- a/samples/export_wb.py
+++ b/samples/export_wb.py
@@ -4,12 +4,11 @@
#
# You will need to do `pip install PyPDF2` to use this sample.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tempfile
import shutil
@@ -52,23 +51,21 @@ def cleanup(tempdir):
def main():
parser = argparse.ArgumentParser(description='Export to PDF all of the views in a workbook.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--site', '-S', default=None, help='Site to log into, do not specify for default site')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--password', '-p', default=None, help='password for the user')
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
parser.add_argument('--file', '-f', default='out.pdf', help='filename to store the exported data')
parser.add_argument('resource_id', help='LUID for the workbook')
args = parser.parse_args()
- if args.password is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.password
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
@@ -76,9 +73,9 @@ def main():
tempdir = tempfile.mkdtemp('tsc')
logging.debug("Saving to tempdir: %s", tempdir)
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
- server = TSC.Server(args.server, use_server_version=True)
try:
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
get_list = functools.partial(get_views_for_workbook, server)
download = functools.partial(download_pdf, server, tempdir)
diff --git a/samples/filter_sort_groups.py b/samples/filter_sort_groups.py
index f8123a29c..24dee791d 100644
--- a/samples/filter_sort_groups.py
+++ b/samples/filter_sort_groups.py
@@ -2,12 +2,11 @@
# This script demonstrates how to filter and sort groups using the Tableau
# Server Client.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -25,30 +24,28 @@ def create_example_group(group_name='Example Group', server=None):
def main():
parser = argparse.ArgumentParser(description='Filter and sort groups.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
- parser.add_argument('-p', default=None)
- args = parser.parse_args()
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
+ args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
- # Determine and use the highest api version for the server
- server.use_server_version()
-
group_name = 'SALES NORTHWEST'
# Try to create a group named "SALES NORTHWEST"
create_example_group(group_name, server)
diff --git a/samples/filter_sort_projects.py b/samples/filter_sort_projects.py
index 0c62614b0..23b350fa6 100644
--- a/samples/filter_sort_projects.py
+++ b/samples/filter_sort_projects.py
@@ -2,11 +2,10 @@
# This script demonstrates how to use the Tableau Server Client
# to filter and sort on the name of the projects present on site.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -26,28 +25,26 @@ def create_example_project(name='Example Project', content_permissions='LockedTo
def main():
parser = argparse.ArgumentParser(description='Filter and sort projects.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('-p', default=None)
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Use highest Server REST API version available
server.use_server_version()
diff --git a/samples/initialize_server.py b/samples/initialize_server.py
index a3e312ce9..a7dd552e1 100644
--- a/samples/initialize_server.py
+++ b/samples/initialize_server.py
@@ -5,7 +5,6 @@
####
import argparse
-import getpass
import glob
import logging
import tableauserverclient as TSC
@@ -13,17 +12,21 @@
def main():
parser = argparse.ArgumentParser(description='Initialize a server with content.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--datasources-folder', '-df', required=True, help='folder containing datasources')
- parser.add_argument('--workbooks-folder', '-wf', required=True, help='folder containing workbooks')
- parser.add_argument('--site-id', '-sid', required=False, default='', help='site id of the site to use')
- parser.add_argument('--project', '-p', required=False, default='Default', help='project to use')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
- args = parser.parse_args()
+ # Options specific to this sample
+ parser.add_argument('--datasources-folder', '-df', required=True, help='folder containing datasources')
+ parser.add_argument('--workbooks-folder', '-wf', required=True, help='folder containing workbooks')
+ parser.add_argument('--project', required=False, default='Default', help='project to use')
- password = getpass.getpass("Password: ")
+ args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
@@ -32,9 +35,8 @@ def main():
################################################################################
# Step 1: Sign in to server.
################################################################################
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
################################################################################
diff --git a/samples/kill_all_jobs.py b/samples/kill_all_jobs.py
index 1aeb7298e..196da4b01 100644
--- a/samples/kill_all_jobs.py
+++ b/samples/kill_all_jobs.py
@@ -1,11 +1,10 @@
####
# This script demonstrates how to kill all of the running jobs
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -13,27 +12,25 @@
def main():
parser = argparse.ArgumentParser(description='Cancel all of the running background jobs.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--site', '-S', default=None, help='site to log into, do not specify for default site')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--password', '-p', default=None, help='password for the user')
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
- if args.password is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.password
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- # SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
req = TSC.RequestOptions()
diff --git a/samples/list.py b/samples/list.py
index 10e11ac04..867757668 100644
--- a/samples/list.py
+++ b/samples/list.py
@@ -1,11 +1,10 @@
####
# This script demonstrates how to list all of the workbooks or datasources
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import os
import sys
@@ -15,28 +14,26 @@
def main():
parser = argparse.ArgumentParser(description='List out the names and LUIDs for different resource types.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--site', '-S', default="", help='site to log into, do not specify for default site')
- parser.add_argument('--token-name', '-n', required=True, help='username to signin under')
- parser.add_argument('--token', '-t', help='personal access token for logging in')
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-n', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
-
+ # Options specific to this sample
parser.add_argument('resource_type', choices=['workbook', 'datasource', 'project', 'view', 'job', 'webhooks'])
args = parser.parse_args()
- token = os.environ.get('TOKEN', args.token)
- if not token:
- print("--token or TOKEN environment variable needs to be set")
- sys.exit(1)
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- # SIGN IN
- tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, token, site_id=args.site)
+ # Sign in to server
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
endpoint = {
diff --git a/samples/login.py b/samples/login.py
index 29e02e14e..c8af97505 100644
--- a/samples/login.py
+++ b/samples/login.py
@@ -1,7 +1,7 @@
####
# This script demonstrates how to log in to Tableau Server Client.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
@@ -13,16 +13,17 @@
def main():
parser = argparse.ArgumentParser(description='Logs in to the server.')
-
+ # This command is special, as it doesn't take `token-value` and it offer both token-based and password based authentication.
+ # Please still try to keep common options like `server` and `site` consistent across samples
+ # Common options:
+ parser.add_argument('--server', '-s', required=True, help='server address')
+ parser.add_argument('--site', '-S', help='site name')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
-
- parser.add_argument('--server', '-s', required=True, help='server address')
-
+ # Options specific to this sample
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--username', '-u', help='username to sign into the server')
group.add_argument('--token-name', '-n', help='name of the personal access token used to sign into the server')
- parser.add_argument('--sitename', '-S', default='')
args = parser.parse_args()
@@ -37,8 +38,8 @@ def main():
# Trying to authenticate using username and password.
password = getpass.getpass("Password: ")
- print("\nSigning in...\nServer: {}\nSite: {}\nUsername: {}".format(args.server, args.sitename, args.username))
- tableau_auth = TSC.TableauAuth(args.username, password, site_id=args.sitename)
+ print("\nSigning in...\nServer: {}\nSite: {}\nUsername: {}".format(args.server, args.site, args.username))
+ tableau_auth = TSC.TableauAuth(args.username, password, site_id=args.site)
with server.auth.sign_in(tableau_auth):
print('Logged in successfully')
@@ -47,9 +48,9 @@ def main():
personal_access_token = getpass.getpass("Personal Access Token: ")
print("\nSigning in...\nServer: {}\nSite: {}\nToken name: {}"
- .format(args.server, args.sitename, args.token_name))
+ .format(args.server, args.site, args.token_name))
tableau_auth = TSC.PersonalAccessTokenAuth(token_name=args.token_name,
- personal_access_token=personal_access_token, site_id=args.sitename)
+ personal_access_token=personal_access_token, site_id=args.site)
with server.auth.sign_in_with_personal_access_token(tableau_auth):
print('Logged in successfully')
diff --git a/samples/metadata_query.py b/samples/metadata_query.py
new file mode 100644
index 000000000..c9cf7394c
--- /dev/null
+++ b/samples/metadata_query.py
@@ -0,0 +1,64 @@
+####
+# This script demonstrates how to use the metadata API to query information on a published data source
+#
+# To run the script, you must have installed Python 3.6 or later.
+####
+
+import argparse
+import logging
+from pprint import pprint
+
+import tableauserverclient as TSC
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Use the metadata API to get information on a published data source.')
+ # Common options; please keep those in sync across all samples
+ parser.add_argument('--server', '-s', required=True, help='server address')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-n', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
+ parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
+ help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('datasource_name', nargs='?', help="The name of the published datasource. If not present, we query all data sources.")
+
+
+ args = parser.parse_args()
+
+ # Set logging level based on user input, or error by default
+ logging_level = getattr(logging, args.logging_level.upper())
+ logging.basicConfig(level=logging_level)
+
+ # Sign in to server
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
+ with server.auth.sign_in(tableau_auth):
+ # Execute the query
+ result = server.metadata.query("""
+ query useMetadataApiToQueryOrdersDatabases($name: String){
+ publishedDatasources (filter: {name: $name}) {
+ luid
+ name
+ description
+ projectName
+ fields {
+ name
+ }
+ }
+ }""", {"name": args.datasource_name})
+
+ # Display warnings/errors (if any)
+ if result.get("errors"):
+ print("### Errors/Warnings:")
+ pprint(result["errors"])
+
+ # Print the results
+ if result.get("data"):
+ print("### Results:")
+ pprint(result["data"]["publishedDatasources"])
+
+if __name__ == '__main__':
+ main()
diff --git a/samples/move_workbook_projects.py b/samples/move_workbook_projects.py
index c31425f25..c8227aeda 100644
--- a/samples/move_workbook_projects.py
+++ b/samples/move_workbook_projects.py
@@ -4,11 +4,10 @@
# a workbook that matches a given name and update it to be in
# the desired project.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -17,25 +16,28 @@
def main():
parser = argparse.ArgumentParser(description='Move one workbook from the default project to another.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--workbook-name', '-w', required=True, help='name of workbook to move')
- parser.add_argument('--destination-project', '-d', required=True, help='name of project to move workbook into')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--workbook-name', '-w', required=True, help='name of workbook to move')
+ parser.add_argument('--destination-project', '-d', required=True, help='name of project to move workbook into')
args = parser.parse_args()
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Step 2: Query workbook to move
req_option = TSC.RequestOptions()
diff --git a/samples/move_workbook_sites.py b/samples/move_workbook_sites.py
index 08bde0ec6..e0475ac06 100644
--- a/samples/move_workbook_sites.py
+++ b/samples/move_workbook_sites.py
@@ -4,11 +4,10 @@
# a workbook that matches a given name, download the workbook,
# and then publish it to the destination site.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import shutil
import tempfile
@@ -21,23 +20,28 @@ def main():
parser = argparse.ArgumentParser(description="Move one workbook from the"
"default project of the default site to"
"the default project of another site.")
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--workbook-name', '-w', required=True, help='name of workbook to move')
- parser.add_argument('--destination-site', '-d', required=True, help='name of site to move workbook into')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--workbook-name', '-w', required=True, help='name of workbook to move')
+ parser.add_argument('--destination-site', '-d', required=True, help='name of site to move workbook into')
- args = parser.parse_args()
- password = getpass.getpass("Password: ")
+ args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to both sites on server
- tableau_auth = TSC.TableauAuth(args.username, password)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
source_server = TSC.Server(args.server)
dest_server = TSC.Server(args.server)
diff --git a/samples/pagination_sample.py b/samples/pagination_sample.py
index 6779023ba..2ebd011dc 100644
--- a/samples/pagination_sample.py
+++ b/samples/pagination_sample.py
@@ -10,7 +10,6 @@
####
import argparse
-import getpass
import logging
import os.path
@@ -20,26 +19,28 @@
def main():
parser = argparse.ArgumentParser(description='Demonstrate pagination on the list of workbooks on the server.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-n', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- # SIGN IN
-
- tableau_auth = TSC.TableauAuth(args.username, password)
- server = TSC.Server(args.server)
-
+ # Sign in to server
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
-
# Pager returns a generator that yields one item at a time fetching
# from Server only when necessary. Pager takes a server Endpoint as its
# first parameter. It will call 'get' on that endpoint. To get workbooks
diff --git a/samples/publish_datasource.py b/samples/publish_datasource.py
index fa0fe2a95..8ae744185 100644
--- a/samples/publish_datasource.py
+++ b/samples/publish_datasource.py
@@ -15,7 +15,7 @@
# more information on personal access tokens, refer to the documentations:
# (https://help.tableau.com/current/server/en-us/security_personal_access_tokens.htm)
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
@@ -26,15 +26,18 @@
def main():
parser = argparse.ArgumentParser(description='Publish a datasource to server.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--site', '-i', help='site name')
+ parser.add_argument('--site', '-S', help='site name')
parser.add_argument('--token-name', '-p', required=True,
help='name of the personal access token used to sign into the server')
parser.add_argument('--token-value', '-v', required=True,
help='value of the personal access token used to sign into the server')
- parser.add_argument('--filepath', '-f', required=True, help='filepath to the datasource to publish')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--file', '-f', required=True, help='filepath to the datasource to publish')
+ parser.add_argument('--project', help='Project within which to publish the datasource')
parser.add_argument('--async', '-a', help='Publishing asynchronously', dest='async_', action='store_true')
parser.add_argument('--conn-username', help='connection username')
parser.add_argument('--conn-password', help='connection password')
@@ -55,9 +58,22 @@ def main():
tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
- # Create a new datasource item to publish - empty project_id field
- # will default the publish to the site's default project
- new_datasource = TSC.DatasourceItem(project_id="")
+ # Empty project_id field will default the publish to the site's default project
+ project_id = ""
+
+ # Retrieve the project id, if a project name was passed
+ if args.project is not None:
+ req_options = TSC.RequestOptions()
+ req_options.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,
+ TSC.RequestOptions.Operator.Equals,
+ args.project))
+ projects = list(TSC.Pager(server.projects, req_options))
+ if len(projects) > 1:
+ raise ValueError("The project name is not unique")
+ project_id = projects[0].id
+
+ # Create a new datasource item to publish
+ new_datasource = TSC.DatasourceItem(project_id=project_id)
# Create a connection_credentials item if connection details are provided
new_conn_creds = None
diff --git a/samples/publish_workbook.py b/samples/publish_workbook.py
index ca366cf9e..fcfcddc15 100644
--- a/samples/publish_workbook.py
+++ b/samples/publish_workbook.py
@@ -11,11 +11,10 @@
# For more information, refer to the documentations on 'Publish Workbook'
# (https://onlinehelp.tableau.com/current/api/rest_api/en-us/help.htm)
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -25,29 +24,30 @@
def main():
parser = argparse.ArgumentParser(description='Publish a workbook to server.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--filepath', '-f', required=True, help='computer filepath of the workbook to publish')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('--file', '-f', required=True, help='local filepath of the workbook to publish')
parser.add_argument('--as-job', '-a', help='Publishing asynchronously', action='store_true')
parser.add_argument('--skip-connection-check', '-c', help='Skip live connection check', action='store_true')
- parser.add_argument('--site', '-S', default='', help='id (contentUrl) of site to sign into')
- args = parser.parse_args()
- password = getpass.getpass("Password: ")
+ args = parser.parse_args()
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server.
- tableau_auth = TSC.TableauAuth(args.username, password, site_id=args.site)
- server = TSC.Server(args.server)
-
- overwrite_true = TSC.Server.PublishMode.Overwrite
-
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
# Step 2: Get all the projects on server, then look for the default one.
@@ -68,6 +68,7 @@ def main():
all_connections.append(connection2)
# Step 3: If default project is found, form a new workbook item and publish.
+ overwrite_true = TSC.Server.PublishMode.Overwrite
if default_project is not None:
new_workbook = TSC.WorkbookItem(default_project.id)
if args.as_job:
diff --git a/samples/query_permissions.py b/samples/query_permissions.py
index a253adc9a..0909f915d 100644
--- a/samples/query_permissions.py
+++ b/samples/query_permissions.py
@@ -1,13 +1,12 @@
####
# This script demonstrates how to query for permissions using TSC
-# To run the script, you must have installed Python 3.5 and later.
+# To run the script, you must have installed Python 3.6 or later.
#
# Example usage: 'python query_permissions.py -s https://10ax.online.tableau.com --site
# devSite123 -u tabby@tableau.com workbook b4065286-80f0-11ea-af1b-cb7191f48e45'
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -15,30 +14,27 @@
def main():
parser = argparse.ArgumentParser(description='Query permissions of a given resource.')
- parser.add_argument('--server', '-s', required=True, help='Server address')
- parser.add_argument('--username', '-u', required=True, help='Username to sign into server')
- parser.add_argument('--site', '-S', default=None, help='Site to sign into - default site if not provided')
- parser.add_argument('-p', default=None, help='Password to sign into server')
-
+ # Common options; please keep those in sync across all samples
+ parser.add_argument('--server', '-s', required=True, help='server address')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
-
+ # Options specific to this sample
parser.add_argument('resource_type', choices=['workbook', 'datasource', 'flow', 'table', 'database'])
parser.add_argument('resource_id')
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Sign in
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
diff --git a/samples/refresh.py b/samples/refresh.py
index 96937a6e3..3eed5b4be 100644
--- a/samples/refresh.py
+++ b/samples/refresh.py
@@ -1,11 +1,10 @@
####
# This script demonstrates how to use trigger a refresh on a datasource or workbook
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -13,30 +12,26 @@
def main():
parser = argparse.ArgumentParser(description='Trigger a refresh task on a workbook or datasource.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('--password', '-p', default=None, help='if not specified, you will be prompted')
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
-
+ # Options specific to this sample
parser.add_argument('resource_type', choices=['workbook', 'datasource'])
parser.add_argument('resource_id')
args = parser.parse_args()
- if args.password is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.password
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- # SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
if args.resource_type == "workbook":
@@ -44,16 +39,19 @@ def main():
resource = server.workbooks.get_by_id(args.resource_id)
# trigger the refresh, you'll get a job id back which can be used to poll for when the refresh is done
- results = server.workbooks.refresh(args.resource_id)
+ job = server.workbooks.refresh(args.resource_id)
else:
# Get the datasource by its Id to make sure it exists
resource = server.datasources.get_by_id(args.resource_id)
# trigger the refresh, you'll get a job id back which can be used to poll for when the refresh is done
- results = server.datasources.refresh(resource)
-
- print(results)
- # TODO: Add a flag that will poll and wait for the returned job to be done
+ job = server.datasources.refresh(resource)
+
+ print(f"Update job posted (ID: {job.id})")
+ print("Waiting for job...")
+ # `wait_for_job` will throw if the job isn't executed successfully
+ job = server.jobs.wait_for_job(job)
+ print("Job finished succesfully")
if __name__ == '__main__':
diff --git a/samples/refresh_tasks.py b/samples/refresh_tasks.py
index f722adb30..bf69d064a 100644
--- a/samples/refresh_tasks.py
+++ b/samples/refresh_tasks.py
@@ -2,11 +2,10 @@
# This script demonstrates how to use the Tableau Server Client
# to query extract refresh tasks and run them as needed.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -30,14 +29,16 @@ def handle_info(server, args):
def main():
parser = argparse.ArgumentParser(description='Get all of the refresh tasks available on a server')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('-p', default=None)
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
-
+ # Options specific to this sample
subcommands = parser.add_subparsers()
list_arguments = subcommands.add_parser('list')
@@ -53,19 +54,13 @@ def main():
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
- server = TSC.Server(args.server)
- server.version = '2.6'
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
args.func(server, args)
diff --git a/samples/set_http_options.py b/samples/set_http_options.py
index 9316dfdde..40ed9167e 100644
--- a/samples/set_http_options.py
+++ b/samples/set_http_options.py
@@ -2,11 +2,10 @@
# This script demonstrates how to set http options. It will set the option
# to not verify SSL certificate, and query all workbooks on site.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -15,21 +14,26 @@
def main():
parser = argparse.ArgumentParser(description='List workbooks on site, with option set to ignore SSL verification.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ # This sample has no additional options, yet. If you add some, please add them here
args = parser.parse_args()
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Create required objects for sign in
- tableau_auth = TSC.TableauAuth(args.username, password)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server)
# Step 2: Set http options to disable verifying SSL
diff --git a/samples/set_refresh_schedule.py b/samples/set_refresh_schedule.py
index 2d4761560..862ea2372 100644
--- a/samples/set_refresh_schedule.py
+++ b/samples/set_refresh_schedule.py
@@ -2,12 +2,11 @@
# This script demonstrates how to set the refresh schedule for
# a workbook or datasource.
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -15,11 +14,16 @@
def usage(args):
parser = argparse.ArgumentParser(description='Set refresh schedule for a workbook or datasource.')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
- parser.add_argument('--password', '-p', default=None)
+ # Options specific to this sample
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--workbook', '-w')
group.add_argument('--datasource', '-d')
@@ -61,18 +65,13 @@ def assign_to_schedule(server, workbook_or_datasource, schedule):
def run(args):
- password = args.password
- if password is None:
- password = getpass.getpass("Password: ")
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
# Step 1: Sign in to server.
- tableau_auth = TSC.TableauAuth(args.username, password)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
-
with server.auth.sign_in(tableau_auth):
if args.workbook:
item = get_workbook_by_name(server, args.workbook)
diff --git a/samples/update_connection.py b/samples/update_connection.py
index 3449441a4..0e87217e8 100644
--- a/samples/update_connection.py
+++ b/samples/update_connection.py
@@ -1,11 +1,10 @@
####
# This script demonstrates how to update a connections credentials on a server to embed the credentials
#
-# To run the script, you must have installed Python 3.5 or later.
+# To run the script, you must have installed Python 3.6 or later.
####
import argparse
-import getpass
import logging
import tableauserverclient as TSC
@@ -13,14 +12,16 @@
def main():
parser = argparse.ArgumentParser(description='Update a connection on a datasource or workbook to embed credentials')
+ # Common options; please keep those in sync across all samples
parser.add_argument('--server', '-s', required=True, help='server address')
- parser.add_argument('--username', '-u', required=True, help='username to sign into server')
- parser.add_argument('--site', '-S', default=None)
- parser.add_argument('-p', default=None)
-
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
help='desired logging level (set to error by default)')
-
+ # Options specific to this sample
parser.add_argument('resource_type', choices=['workbook', 'datasource'])
parser.add_argument('resource_id')
parser.add_argument('connection_id')
@@ -29,17 +30,11 @@ def main():
args = parser.parse_args()
- if args.p is None:
- password = getpass.getpass("Password: ")
- else:
- password = args.p
-
# Set logging level based on user input, or error by default
logging_level = getattr(logging, args.logging_level.upper())
logging.basicConfig(level=logging_level)
- # SIGN IN
- tableau_auth = TSC.TableauAuth(args.username, password, args.site)
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
server = TSC.Server(args.server, use_server_version=True)
with server.auth.sign_in(tableau_auth):
endpoint = {
diff --git a/samples/update_datasource_data.py b/samples/update_datasource_data.py
new file mode 100644
index 000000000..74c8ea6fb
--- /dev/null
+++ b/samples/update_datasource_data.py
@@ -0,0 +1,78 @@
+####
+# This script demonstrates how to update the data within a published
+# live-to-Hyper datasource on server.
+#
+# The sample is hardcoded against the `World Indicators` dataset and
+# expects to receive the LUID of a published datasource containing
+# that data. To create such a published datasource, you can use:
+# ./publish_datasource.py --file ../test/assets/World\ Indicators.hyper
+# which will print you the LUID of the datasource.
+#
+# Before running this script, the datasource will contain a region `Europe`.
+# After running this script, that region will be gone.
+#
+####
+
+import argparse
+import uuid
+import logging
+
+import tableauserverclient as TSC
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Delete the `Europe` region from a published `World Indicators` datasource.')
+ # Common options; please keep those in sync across all samples
+ parser.add_argument('--server', '-s', required=True, help='server address')
+ parser.add_argument('--site', '-S', help='site name')
+ parser.add_argument('--token-name', '-p', required=True,
+ help='name of the personal access token used to sign into the server')
+ parser.add_argument('--token-value', '-v', required=True,
+ help='value of the personal access token used to sign into the server')
+ parser.add_argument('--logging-level', '-l', choices=['debug', 'info', 'error'], default='error',
+ help='desired logging level (set to error by default)')
+ # Options specific to this sample
+ parser.add_argument('datasource_id', help="The LUID of the `World Indicators` datasource")
+
+ args = parser.parse_args()
+
+ # Set logging level based on user input, or error by default
+ logging_level = getattr(logging, args.logging_level.upper())
+ logging.basicConfig(level=logging_level)
+
+ tableau_auth = TSC.PersonalAccessTokenAuth(args.token_name, args.token_value, site_id=args.site)
+ server = TSC.Server(args.server, use_server_version=True)
+ with server.auth.sign_in(tableau_auth):
+ # We use a unique `request_id` for every request.
+ # In case the submission of the update job fails, we won't know wether the job was submitted
+ # or not. It could be that the server received the request, changed the data, but then the
+ # network connection broke down.
+ # If you want to have a way to retry, e.g., inserts while making sure they aren't duplicated,
+ # you need to use `request_id` for that purpose.
+ # In our case, we don't care about retries. And the delete is idempotent anyway.
+ # Hence, we simply use a randomly generated request id.
+ request_id = str(uuid.uuid4())
+
+ # This action will delete all rows with `Region=Europe` from the published data source.
+ # Other actions (inserts, updates, ...) are also available. For more information see
+ # https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_how_to_update_data_to_hyper.htm
+ actions = [
+ {
+ "action": "delete",
+ "target-table": "Extract",
+ "target-schema": "Extract",
+ "condition": {"op": "eq", "target-col": "Region", "const": {"type": "string", "v": "Europe"}}
+ }
+ ]
+
+ job = server.datasources.update_hyper_data(args.datasource_id, request_id=request_id, actions=actions)
+
+ print(f"Update job posted (ID: {job.id})")
+ print("Waiting for job...")
+ # `wait_for_job` will throw if the job isn't executed successfully
+ job = server.jobs.wait_for_job(job)
+ print("Job finished succesfully")
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tableauserverclient/__init__.py b/tableauserverclient/__init__.py
index fcce4e0c7..2ad65d71e 100644
--- a/tableauserverclient/__init__.py
+++ b/tableauserverclient/__init__.py
@@ -34,6 +34,7 @@
FlowItem,
WebhookItem,
PersonalAccessTokenAuth,
+ FlowRunItem
)
from .server import (
RequestOptions,
diff --git a/tableauserverclient/exponential_backoff.py b/tableauserverclient/exponential_backoff.py
new file mode 100644
index 000000000..2b3ded109
--- /dev/null
+++ b/tableauserverclient/exponential_backoff.py
@@ -0,0 +1,30 @@
+import time
+
+# Polling for server-side events (such as job completion) uses exponential backoff for the sleep intervals between polls
+ASYNC_POLL_MIN_INTERVAL=0.5
+ASYNC_POLL_MAX_INTERVAL=30
+ASYNC_POLL_BACKOFF_FACTOR=1.4
+
+
+class ExponentialBackoffTimer():
+ def __init__(self, *, timeout=None):
+ self.start_time = time.time()
+ self.timeout = timeout
+ self.current_sleep_interval = ASYNC_POLL_MIN_INTERVAL
+
+ def sleep(self):
+ max_sleep_time = ASYNC_POLL_MAX_INTERVAL
+ if self.timeout is not None:
+ elapsed = (time.time() - self.start_time)
+ if elapsed >= self.timeout:
+ raise TimeoutError(f"Timeout after {elapsed} seconds waiting for asynchronous event")
+ remaining_time = self.timeout - elapsed
+ # Usually, we would sleep for `ASYNC_POLL_MAX_INTERVAL`, but we don't want to sleep over the timeout
+ max_sleep_time = min(ASYNC_POLL_MAX_INTERVAL, remaining_time)
+ # We want to sleep at least for `ASYNC_POLL_MIN_INTERVAL`. This is important to ensure that, as we get
+ # closer to the timeout, we don't accidentally wake up multiple times and hit the server in rapid succession
+ # due to waking up to early from the `sleep`.
+ max_sleep_time = max(max_sleep_time, ASYNC_POLL_MIN_INTERVAL)
+
+ time.sleep(min(self.current_sleep_interval, max_sleep_time))
+ self.current_sleep_interval *= ASYNC_POLL_BACKOFF_FACTOR
\ No newline at end of file
diff --git a/tableauserverclient/models/__init__.py b/tableauserverclient/models/__init__.py
index c0ddc2e75..e5945782d 100644
--- a/tableauserverclient/models/__init__.py
+++ b/tableauserverclient/models/__init__.py
@@ -10,6 +10,7 @@
from .favorites_item import FavoriteItem
from .group_item import GroupItem
from .flow_item import FlowItem
+from .flow_run_item import FlowRunItem
from .interval_item import (
IntervalItem,
DailyInterval,
diff --git a/tableauserverclient/models/datasource_item.py b/tableauserverclient/models/datasource_item.py
index 78c2a44ca..5b23341d0 100644
--- a/tableauserverclient/models/datasource_item.py
+++ b/tableauserverclient/models/datasource_item.py
@@ -24,6 +24,7 @@ def __init__(self, project_id, name=None):
self._content_url = None
self._created_at = None
self._datasource_type = None
+ self._description = None
self._encrypt_extracts = None
self._has_extracts = None
self._id = None
@@ -131,6 +132,14 @@ def project_name(self):
def datasource_type(self):
return self._datasource_type
+ @property
+ def description(self):
+ return self._description
+
+ @description.setter
+ def description(self, value):
+ self._description = value
+
@property
def updated_at(self):
return self._updated_at
@@ -314,6 +323,15 @@ def from_response(cls, resp, ns):
@staticmethod
def _parse_element(datasource_xml, ns):
+ id_ = datasource_xml.get('id', None)
+ name = datasource_xml.get('name', None)
+ datasource_type = datasource_xml.get('type', None)
+ description = datasource_xml.get('description', None)
+ content_url = datasource_xml.get('contentUrl', None)
+ created_at = parse_datetime(datasource_xml.get('createdAt', None))
+ updated_at = parse_datetime(datasource_xml.get('updatedAt', None))
+ certification_note = datasource_xml.get('certificationNote', None)
+ certified = str(datasource_xml.get('isCertified', None)).lower() == 'true'
certification_note = datasource_xml.get("certificationNote", None)
certified = str(datasource_xml.get("isCertified", None)).lower() == "true"
content_url = datasource_xml.get("contentUrl", None)
diff --git a/tableauserverclient/models/flow_run_item.py b/tableauserverclient/models/flow_run_item.py
new file mode 100644
index 000000000..251c667b1
--- /dev/null
+++ b/tableauserverclient/models/flow_run_item.py
@@ -0,0 +1,106 @@
+import xml.etree.ElementTree as ET
+from ..datetime_helpers import parse_datetime
+import itertools
+
+
+class FlowRunItem(object):
+ def __init__(self) -> None:
+ self._id=None
+ self._flow_id=None
+ self._status=None
+ self._started_at=None
+ self._completed_at=None
+ self._progress=None
+ self._background_job_id=None
+
+
+ @property
+ def id(self):
+ return self._id
+
+
+ @property
+ def flow_id(self):
+ return self._flow_id
+
+
+ @property
+ def status(self):
+ return self._status
+
+
+ @property
+ def started_at(self):
+ return self._started_at
+
+
+ @property
+ def completed_at(self):
+ return self._completed_at
+
+
+ @property
+ def progress(self):
+ return self._progress
+
+
+ @property
+ def background_job_id(self):
+ return self._background_job_id
+
+
+ def _set_values(
+ self,
+ id,
+ flow_id,
+ status,
+ started_at,
+ completed_at,
+ progress,
+ background_job_id,
+ ):
+ if id is not None:
+ self._id = id
+ if flow_id is not None:
+ self._flow_id = flow_id
+ if status is not None:
+ self._status = status
+ if started_at is not None:
+ self._started_at = started_at
+ if completed_at is not None:
+ self._completed_at = completed_at
+ if progress is not None:
+ self._progress = progress
+ if background_job_id is not None:
+ self._background_job_id = background_job_id
+
+
+ @classmethod
+ def from_response(cls, resp, ns):
+ all_flowrun_items = list()
+ parsed_response = ET.fromstring(resp)
+ all_flowrun_xml = itertools.chain(
+ parsed_response.findall(".//t:flowRun[@id]", namespaces=ns),
+ parsed_response.findall(".//t:flowRuns[@id]", namespaces=ns)
+ )
+
+ for flowrun_xml in all_flowrun_xml:
+ parsed = cls._parse_element(flowrun_xml, ns)
+ flowrun_item = cls()
+ flowrun_item._set_values(**parsed)
+ all_flowrun_items.append(flowrun_item)
+ return all_flowrun_items
+
+
+ @staticmethod
+ def _parse_element(flowrun_xml, ns):
+ result = {}
+ result['id'] = flowrun_xml.get("id", None)
+ result['flow_id'] = flowrun_xml.get("flowId", None)
+ result['status'] = flowrun_xml.get("status", None)
+ result['started_at'] = parse_datetime(flowrun_xml.get("startedAt", None))
+ result['completed_at'] = parse_datetime(flowrun_xml.get("completedAt", None))
+ result['progress'] = flowrun_xml.get("progress", None)
+ result['background_job_id'] = flowrun_xml.get("backgroundJobId", None)
+
+ return result
diff --git a/tableauserverclient/models/job_item.py b/tableauserverclient/models/job_item.py
index 7a3a50861..8c21b24e6 100644
--- a/tableauserverclient/models/job_item.py
+++ b/tableauserverclient/models/job_item.py
@@ -1,8 +1,19 @@
import xml.etree.ElementTree as ET
+from .flow_run_item import FlowRunItem
from ..datetime_helpers import parse_datetime
class JobItem(object):
+ class FinishCode:
+ """
+ Status codes as documented on
+ https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref_jobs_tasks_and_schedules.htm#query_job
+ """
+ Success = 0
+ Failed = 1
+ Cancelled = 2
+
+
def __init__(
self,
id_,
@@ -14,6 +25,7 @@ def __init__(
finish_code=0,
notes=None,
mode=None,
+ flow_run=None,
):
self._id = id_
self._type = job_type
@@ -24,6 +36,7 @@ def __init__(
self._finish_code = finish_code
self._notes = notes or []
self._mode = mode
+ self._flow_run = flow_run
@property
def id(self):
@@ -66,6 +79,14 @@ def mode(self, value):
# check for valid data here
self._mode = value
+ @property
+ def flow_run(self):
+ return self._flow_run
+
+ @flow_run.setter
+ def flow_run(self, value):
+ self._flow_run = value
+
def __repr__(self):
return (
"= FILESIZE_LIMIT:
logger.info("Publishing {0} to server with chunking method (datasource over 64MB)".format(filename))
- upload_session_id = Fileuploads.upload_chunks(self.parent_srv, file)
+ upload_session_id = self.parent_srv.fileuploads.upload(file)
url = "{0}&uploadSessionId={1}".format(url, upload_session_id)
xml_request, content_type = RequestFactory.Datasource.publish_req_chunked(
datasource_item, connection_credentials, connections
@@ -282,10 +282,34 @@ def publish(
new_datasource = DatasourceItem.from_response(server_response.content, self.parent_srv.namespace)[0]
logger.info("Published {0} (ID: {1})".format(filename, new_datasource.id))
return new_datasource
- server_response = self.post_request(url, xml_request, content_type)
- new_datasource = DatasourceItem.from_response(server_response.content, self.parent_srv.namespace)[0]
- logger.info("Published {0} (ID: {1})".format(filename, new_datasource.id))
- return new_datasource
+
+ @api(version="3.13")
+ def update_hyper_data(self, datasource_or_connection_item, *, request_id, actions, payload = None):
+ if isinstance(datasource_or_connection_item, DatasourceItem):
+ datasource_id = datasource_or_connection_item.id
+ url = "{0}/{1}/data".format(self.baseurl, datasource_id)
+ elif isinstance(datasource_or_connection_item, ConnectionItem):
+ datasource_id = datasource_or_connection_item.datasource_id
+ connection_id = datasource_or_connection_item.id
+ url = "{0}/{1}/connections/{2}/data".format(self.baseurl, datasource_id, connection_id)
+ else:
+ assert isinstance(datasource_or_connection_item, str)
+ url = "{0}/{1}/data".format(self.baseurl, datasource_or_connection_item)
+
+ if payload is not None:
+ if not os.path.isfile(payload):
+ error = "File path does not lead to an existing file."
+ raise IOError(error)
+
+ logger.info("Uploading {0} to server with chunking method for Update job".format(payload))
+ upload_session_id = self.parent_srv.fileuploads.upload(payload)
+ url = "{0}?uploadSessionId={1}".format(url, upload_session_id)
+
+ json_request = json.dumps({"actions": actions})
+ parameters = {"headers": {"requestid": request_id}}
+ server_response = self.patch_request(url, json_request, "application/json", parameters=parameters)
+ new_job = JobItem.from_response(server_response.content, self.parent_srv.namespace)[0]
+ return new_job
@api(version="2.0")
def populate_permissions(self, item):
diff --git a/tableauserverclient/server/endpoint/endpoint.py b/tableauserverclient/server/endpoint/endpoint.py
index c7be8fc77..31291abc9 100644
--- a/tableauserverclient/server/endpoint/endpoint.py
+++ b/tableauserverclient/server/endpoint/endpoint.py
@@ -55,7 +55,9 @@ def _make_request(
):
parameters = parameters or {}
parameters.update(self.parent_srv.http_options)
- parameters["headers"] = Endpoint._make_common_headers(auth_token, content_type)
+ if not "headers" in parameters:
+ parameters["headers"] = {}
+ parameters["headers"].update(Endpoint._make_common_headers(auth_token, content_type))
if content is not None:
parameters["data"] = content
@@ -118,22 +120,34 @@ def delete_request(self, url):
# We don't return anything for a delete
self._make_request(self.parent_srv.session.delete, url, auth_token=self.parent_srv.auth_token)
- def put_request(self, url, xml_request=None, content_type="text/xml"):
+ def put_request(self, url, xml_request=None, content_type="text/xml", parameters=None):
return self._make_request(
self.parent_srv.session.put,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
+ parameters=parameters,
)
- def post_request(self, url, xml_request, content_type="text/xml"):
+ def post_request(self, url, xml_request, content_type="text/xml", parameters=None):
return self._make_request(
self.parent_srv.session.post,
url,
content=xml_request,
auth_token=self.parent_srv.auth_token,
content_type=content_type,
+ parameters=parameters,
+ )
+
+ def patch_request(self, url, xml_request, content_type="text/xml", parameters=None):
+ return self._make_request(
+ self.parent_srv.session.patch,
+ url,
+ content=xml_request,
+ auth_token=self.parent_srv.auth_token,
+ content_type=content_type,
+ parameters=parameters,
)
diff --git a/tableauserverclient/server/endpoint/exceptions.py b/tableauserverclient/server/endpoint/exceptions.py
index 9a9a81d77..48dcaf4c8 100644
--- a/tableauserverclient/server/endpoint/exceptions.py
+++ b/tableauserverclient/server/endpoint/exceptions.py
@@ -64,3 +64,27 @@ def __str__(self):
from pprint import pformat
return pformat(self.error)
+
+
+class JobFailedException(Exception):
+ def __init__(self, job):
+ self.notes = job.notes
+ self.job = job
+
+ def __str__(self):
+ return f"Job {self.job.id} failed with notes {self.notes}"
+
+
+class JobCancelledException(JobFailedException):
+ pass
+class FlowRunFailedException(Exception):
+ def __init__(self, flow_run):
+ self.background_job_id = flow_run.background_job_id
+ self.flow_run = flow_run
+
+ def __str__(self):
+ return f"FlowRun {self.flow_run.id} failed with job id {self.background_job_id}"
+
+
+class FlowRunCancelledException(FlowRunFailedException):
+ pass
diff --git a/tableauserverclient/server/endpoint/fileuploads_endpoint.py b/tableauserverclient/server/endpoint/fileuploads_endpoint.py
index 05a3ce17c..b70cffbaa 100644
--- a/tableauserverclient/server/endpoint/fileuploads_endpoint.py
+++ b/tableauserverclient/server/endpoint/fileuploads_endpoint.py
@@ -14,7 +14,6 @@
class Fileuploads(Endpoint):
def __init__(self, parent_srv):
super(Fileuploads, self).__init__(parent_srv)
- self.upload_id = ""
@property
def baseurl(self):
@@ -25,21 +24,18 @@ def initiate(self):
url = self.baseurl
server_response = self.post_request(url, "")
fileupload_item = FileuploadItem.from_response(server_response.content, self.parent_srv.namespace)
- self.upload_id = fileupload_item.upload_session_id
- logger.info("Initiated file upload session (ID: {0})".format(self.upload_id))
- return self.upload_id
+ upload_id = fileupload_item.upload_session_id
+ logger.info("Initiated file upload session (ID: {0})".format(upload_id))
+ return upload_id
@api(version="2.0")
- def append(self, xml_request, content_type):
- if not self.upload_id:
- error = "File upload session must be initiated first."
- raise MissingRequiredFieldError(error)
- url = "{0}/{1}".format(self.baseurl, self.upload_id)
- server_response = self.put_request(url, xml_request, content_type)
- logger.info("Uploading a chunk to session (ID: {0})".format(self.upload_id))
+ def append(self, upload_id, data, content_type):
+ url = "{0}/{1}".format(self.baseurl, upload_id)
+ server_response = self.put_request(url, data, content_type)
+ logger.info("Uploading a chunk to session (ID: {0})".format(upload_id))
return FileuploadItem.from_response(server_response.content, self.parent_srv.namespace)
- def read_chunks(self, file):
+ def _read_chunks(self, file):
file_opened = False
try:
file_content = open(file, "rb")
@@ -47,23 +43,21 @@ def read_chunks(self, file):
except TypeError:
file_content = file
- while True:
- chunked_content = file_content.read(CHUNK_SIZE)
- if not chunked_content:
- if file_opened:
- file_content.close()
- break
- yield chunked_content
-
- @classmethod
- def upload_chunks(cls, parent_srv, file):
- file_uploader = cls(parent_srv)
- upload_id = file_uploader.initiate()
-
- chunks = file_uploader.read_chunks(file)
- for chunk in chunks:
- xml_request, content_type = RequestFactory.Fileupload.chunk_req(chunk)
- fileupload_item = file_uploader.append(xml_request, content_type)
+ try:
+ while True:
+ chunked_content = file_content.read(CHUNK_SIZE)
+ if not chunked_content:
+ break
+ yield chunked_content
+ finally:
+ if file_opened:
+ file_content.close()
+
+ def upload(self, file):
+ upload_id = self.initiate()
+ for chunk in self._read_chunks(file):
+ request, content_type = RequestFactory.Fileupload.chunk_req(chunk)
+ fileupload_item = self.append(upload_id, request, content_type)
logger.info("\tPublished {0}MB".format(fileupload_item.file_size))
- logger.info("\tCommitting file upload...")
+ logger.info("File upload finished (ID: {0})".format(upload_id))
return upload_id
diff --git a/tableauserverclient/server/endpoint/flow_runs_endpoint.py b/tableauserverclient/server/endpoint/flow_runs_endpoint.py
new file mode 100644
index 000000000..2ae1973d4
--- /dev/null
+++ b/tableauserverclient/server/endpoint/flow_runs_endpoint.py
@@ -0,0 +1,76 @@
+from .endpoint import Endpoint, QuerysetEndpoint, api
+from .exceptions import FlowRunFailedException, FlowRunCancelledException
+from .. import FlowRunItem, PaginationItem
+from ...exponential_backoff import ExponentialBackoffTimer
+
+import logging
+
+logger = logging.getLogger("tableau.endpoint.flowruns")
+
+
+class FlowRuns(QuerysetEndpoint):
+ def __init__(self, parent_srv):
+ super(FlowRuns, self).__init__(parent_srv)
+
+ @property
+ def baseurl(self):
+ return "{0}/sites/{1}/flows/runs".format(self.parent_srv.baseurl, self.parent_srv.site_id)
+
+ # Get all flows
+ @api(version="3.10")
+ def get(self, req_options=None):
+ logger.info("Querying all flow runs on site")
+ url = self.baseurl
+ server_response = self.get_request(url, req_options)
+ pagination_item = PaginationItem.from_response(server_response.content, self.parent_srv.namespace)
+ all_flow_run_items = FlowRunItem.from_response(server_response.content, self.parent_srv.namespace)
+ return all_flow_run_items, pagination_item
+
+ # Get 1 flow by id
+ @api(version="3.10")
+ def get_by_id(self, flow_run_id):
+ if not flow_run_id:
+ error = "Flow ID undefined."
+ raise ValueError(error)
+ logger.info("Querying single flow (ID: {0})".format(flow_run_id))
+ url = "{0}/{1}".format(self.baseurl, flow_run_id)
+ server_response = self.get_request(url)
+ return FlowRunItem.from_response(server_response.content, self.parent_srv.namespace)[0]
+
+
+ # Cancel 1 flow run by id
+ @api(version="3.10")
+ def cancel(self, flow_run_id):
+ if not flow_run_id:
+ error = "Flow ID undefined."
+ raise ValueError(error)
+ id_ = getattr(flow_run_id, 'id', flow_run_id)
+ url = "{0}/{1}".format(self.baseurl, id_)
+ self.put_request(url)
+ logger.info("Deleted single flow (ID: {0})".format(id_))
+
+
+ @api(version="3.10")
+ def wait_for_job(self, flow_run_id, *, timeout=None):
+ if isinstance(flow_run_id, FlowRunItem):
+ flow_run_id = flow_run_id.id
+ assert isinstance(flow_run_id, str)
+ logger.debug(f"Waiting for flow run {flow_run_id}")
+
+ backoffTimer = ExponentialBackoffTimer(timeout=timeout)
+ flow_run = self.get_by_id(flow_run_id)
+ while flow_run.completed_at is None:
+ backoffTimer.sleep()
+ flow_run = self.get_by_id(flow_run_id)
+ logger.debug(f"\tFlowRun {flow_run_id} progress={flow_run.progress}")
+
+ logger.info("FlowRun {} Completed: Status: {}".format(flow_run_id, flow_run.status))
+
+ if flow_run.status == "Success":
+ return flow_run
+ elif flow_run.status == "Failed":
+ raise FlowRunFailedException(flow_run)
+ elif flow_run.status == "Cancelled":
+ raise FlowRunCancelledException(flow_run)
+ else:
+ raise AssertionError("Unexpected status in flow_run", flow_run)
\ No newline at end of file
diff --git a/tableauserverclient/server/endpoint/flows_endpoint.py b/tableauserverclient/server/endpoint/flows_endpoint.py
index 475166aad..eb2de4ac9 100644
--- a/tableauserverclient/server/endpoint/flows_endpoint.py
+++ b/tableauserverclient/server/endpoint/flows_endpoint.py
@@ -2,7 +2,6 @@
from .exceptions import InternalServerError, MissingRequiredFieldError
from .permissions_endpoint import _PermissionsEndpoint
from .dqw_endpoint import _DataQualityWarningEndpoint
-from .fileuploads_endpoint import Fileuploads
from .resource_tagger import _ResourceTagger
from .. import RequestFactory, FlowItem, PaginationItem, ConnectionItem
from ...filesys_helpers import to_filename, make_download_path
@@ -169,7 +168,7 @@ def publish(self, flow_item, file_path, mode, connections=None):
# Determine if chunking is required (64MB is the limit for single upload method)
if os.path.getsize(file_path) >= FILESIZE_LIMIT:
logger.info("Publishing {0} to server with chunking method (flow over 64MB)".format(filename))
- upload_session_id = Fileuploads.upload_chunks(self.parent_srv, file_path)
+ upload_session_id = self.parent_srv.fileuploads.upload(file_path)
url = "{0}&uploadSessionId={1}".format(url, upload_session_id)
xml_request, content_type = RequestFactory.Flow.publish_req_chunked(flow_item, connections)
else:
diff --git a/tableauserverclient/server/endpoint/jobs_endpoint.py b/tableauserverclient/server/endpoint/jobs_endpoint.py
index 6079ca788..4cdbcc5be 100644
--- a/tableauserverclient/server/endpoint/jobs_endpoint.py
+++ b/tableauserverclient/server/endpoint/jobs_endpoint.py
@@ -1,18 +1,13 @@
from .endpoint import Endpoint, api
+from .exceptions import JobCancelledException, JobFailedException
from .. import JobItem, BackgroundJobItem, PaginationItem
from ..request_options import RequestOptionsBase
+from ...exponential_backoff import ExponentialBackoffTimer
import logging
-try:
- basestring
-except NameError:
- # In case we are in python 3 the string check is different
- basestring = str
-
logger = logging.getLogger("tableau.endpoint.jobs")
-
class Jobs(Endpoint):
@property
def baseurl(self):
@@ -21,7 +16,7 @@ def baseurl(self):
@api(version="2.6")
def get(self, job_id=None, req_options=None):
# Backwards Compatibility fix until we rev the major version
- if job_id is not None and isinstance(job_id, basestring):
+ if job_id is not None and isinstance(job_id, str):
import warnings
warnings.warn("Jobs.get(job_id) is deprecated, update code to use Jobs.get_by_id(job_id)")
@@ -48,3 +43,28 @@ def get_by_id(self, job_id):
server_response = self.get_request(url)
new_job = JobItem.from_response(server_response.content, self.parent_srv.namespace)[0]
return new_job
+
+ @api(version="2.6")
+ def wait_for_job(self, job_id, *, timeout=None):
+ if isinstance(job_id, JobItem):
+ job_id = job_id.id
+ assert isinstance(job_id, str)
+ logger.debug(f"Waiting for job {job_id}")
+
+ backoffTimer = ExponentialBackoffTimer(timeout=timeout)
+ job = self.get_by_id(job_id)
+ while job.completed_at is None:
+ backoffTimer.sleep()
+ job = self.get_by_id(job_id)
+ logger.debug(f"\tJob {job_id} progress={job.progress}")
+
+ logger.info("Job {} Completed: Finish Code: {} - Notes:{}".format(job_id, job.finish_code, job.notes))
+
+ if job.finish_code == JobItem.FinishCode.Success:
+ return job
+ elif job.finish_code == JobItem.FinishCode.Failed:
+ raise JobFailedException(job)
+ elif job.finish_code == JobItem.FinishCode.Cancelled:
+ raise JobCancelledException(job)
+ else:
+ raise AssertionError("Unexpected finish_code in job", job)
diff --git a/tableauserverclient/server/endpoint/metadata_endpoint.py b/tableauserverclient/server/endpoint/metadata_endpoint.py
index 368a92a97..adc7b2666 100644
--- a/tableauserverclient/server/endpoint/metadata_endpoint.py
+++ b/tableauserverclient/server/endpoint/metadata_endpoint.py
@@ -57,8 +57,9 @@ def control_baseurl(self):
return "{0}/api/metadata/v1/control".format(self.parent_srv.server_address)
@api("3.5")
- def query(self, query, variables=None, abort_on_error=False):
+ def query(self, query, variables=None, abort_on_error=False, parameters=None):
logger.info("Querying Metadata API")
+
url = self.baseurl
try:
@@ -67,7 +68,7 @@ def query(self, query, variables=None, abort_on_error=False):
raise InvalidGraphQLQuery("Must provide a string")
# Setting content type because post_reuqest defaults to text/xml
- server_response = self.post_request(url, graphql_query, content_type="text/json")
+ server_response = self.post_request(url, graphql_query, content_type="application/json", parameters=parameters)
results = server_response.json()
if abort_on_error and results.get("errors", None):
@@ -112,7 +113,7 @@ def paginated_query(self, query, variables=None, abort_on_error=False):
paginated_results = results_dict["pages"]
# get first page
- server_response = self.post_request(url, graphql_query, content_type="text/json")
+ server_response = self.post_request(url, graphql_query, content_type="application/json")
results = server_response.json()
if abort_on_error and results.get("errors", None):
@@ -129,7 +130,7 @@ def paginated_query(self, query, variables=None, abort_on_error=False):
# make the call
logger.debug("Calling Token: " + cursor)
graphql_query = json.dumps({"query": query, "variables": variables})
- server_response = self.post_request(url, graphql_query, content_type="text/json")
+ server_response = self.post_request(url, graphql_query, content_type="application/json")
results = server_response.json()
# verify response
if abort_on_error and results.get("errors", None):
diff --git a/tableauserverclient/server/endpoint/permissions_endpoint.py b/tableauserverclient/server/endpoint/permissions_endpoint.py
index 7035837f4..5013a0bef 100644
--- a/tableauserverclient/server/endpoint/permissions_endpoint.py
+++ b/tableauserverclient/server/endpoint/permissions_endpoint.py
@@ -44,7 +44,7 @@ def delete(self, resource, rules):
for rule in rules:
for capability, mode in rule.capabilities.items():
- " /permissions/groups/group-id/capability-name/capability-mode"
+ "/permissions/groups/group-id/capability-name/capability-mode"
url = "{0}/{1}/permissions/{2}/{3}/{4}/{5}".format(
self.owner_baseurl(),
resource.id,
diff --git a/tableauserverclient/server/endpoint/server_info_endpoint.py b/tableauserverclient/server/endpoint/server_info_endpoint.py
index 8776477d3..ca3715fca 100644
--- a/tableauserverclient/server/endpoint/server_info_endpoint.py
+++ b/tableauserverclient/server/endpoint/server_info_endpoint.py
@@ -17,7 +17,7 @@ def baseurl(self):
@api(version="2.4")
def get(self):
- """ Retrieve the server info for the server. This is an unauthenticated call """
+ """Retrieve the server info for the server. This is an unauthenticated call"""
try:
server_response = self.get_unauthenticated_request(self.baseurl)
except ServerResponseError as e:
diff --git a/tableauserverclient/server/endpoint/workbooks_endpoint.py b/tableauserverclient/server/endpoint/workbooks_endpoint.py
index df14674c6..a3f14c291 100644
--- a/tableauserverclient/server/endpoint/workbooks_endpoint.py
+++ b/tableauserverclient/server/endpoint/workbooks_endpoint.py
@@ -1,7 +1,6 @@
from .endpoint import QuerysetEndpoint, api, parameter_added_in
from .exceptions import InternalServerError, MissingRequiredFieldError
from .permissions_endpoint import _PermissionsEndpoint
-from .fileuploads_endpoint import Fileuploads
from .resource_tagger import _ResourceTagger
from .. import RequestFactory, WorkbookItem, ConnectionItem, ViewItem, PaginationItem
from ...models.job_item import JobItem
@@ -344,7 +343,7 @@ def publish(
# Determine if chunking is required (64MB is the limit for single upload method)
if file_size >= FILESIZE_LIMIT:
logger.info("Publishing {0} to server with chunking method (workbook over 64MB)".format(workbook_item.name))
- upload_session_id = Fileuploads.upload_chunks(self.parent_srv, file)
+ upload_session_id = self.parent_srv.fileuploads.upload(file)
url = "{0}&uploadSessionId={1}".format(url, upload_session_id)
conn_creds = connection_credentials
xml_request, content_type = RequestFactory.Workbook.publish_req_chunked(
diff --git a/tableauserverclient/server/request_options.py b/tableauserverclient/server/request_options.py
index 23d10b3d6..3047691a9 100644
--- a/tableauserverclient/server/request_options.py
+++ b/tableauserverclient/server/request_options.py
@@ -98,7 +98,7 @@ def get_query_params(self):
class _FilterOptionsBase(RequestOptionsBase):
- """ Provide a basic implementation of adding view filters to the url """
+ """Provide a basic implementation of adding view filters to the url"""
def __init__(self):
self.view_filters = []
@@ -182,6 +182,7 @@ class PageType:
Note = "note"
Quarto = "quarto"
Tabloid = "tabloid"
+ Unspecified = "unspecified"
class Orientation:
Portrait = "portrait"
diff --git a/tableauserverclient/server/server.py b/tableauserverclient/server/server.py
index 057c98877..56fc47849 100644
--- a/tableauserverclient/server/server.py
+++ b/tableauserverclient/server/server.py
@@ -24,6 +24,8 @@
DataAccelerationReport,
Favorites,
DataAlerts,
+ Fileuploads,
+ FlowRuns
)
from .endpoint.exceptions import (
EndpointUnavailableError,
@@ -82,7 +84,9 @@ def __init__(self, server_address, use_server_version=False):
self.webhooks = Webhooks(self)
self.data_acceleration_report = DataAccelerationReport(self)
self.data_alerts = DataAlerts(self)
+ self.fileuploads = Fileuploads(self)
self._namespace = Namespace()
+ self.flow_runs = FlowRuns(self)
if use_server_version:
self.use_server_version()
diff --git a/test/_utils.py b/test/_utils.py
index ecabf53a4..626838f23 100644
--- a/test/_utils.py
+++ b/test/_utils.py
@@ -1,3 +1,5 @@
+from contextlib import contextmanager
+import unittest
import os.path
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
@@ -14,3 +16,22 @@ def read_xml_asset(filename):
def read_xml_assets(*args):
return map(read_xml_asset, args)
+
+
+@contextmanager
+def mocked_time():
+ mock_time = 0
+
+ def sleep_mock(interval):
+ nonlocal mock_time
+ mock_time += interval
+
+ def get_time():
+ return mock_time
+
+ try:
+ patch = unittest.mock.patch
+ except AttributeError:
+ from unittest.mock import patch
+ with patch("time.sleep", sleep_mock), patch("time.time", get_time):
+ yield get_time
diff --git a/test/assets/datasource_data_update.xml b/test/assets/datasource_data_update.xml
new file mode 100644
index 000000000..305caaf0b
--- /dev/null
+++ b/test/assets/datasource_data_update.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+ 7ecaccd8-39b0-4875-a77d-094f6e930019
+
+
+
diff --git a/test/assets/datasource_get.xml b/test/assets/datasource_get.xml
index 5858d318d..951409caa 100644
--- a/test/assets/datasource_get.xml
+++ b/test/assets/datasource_get.xml
@@ -2,12 +2,12 @@
-
+
-
+
diff --git a/test/assets/datasource_get_by_id.xml b/test/assets/datasource_get_by_id.xml
index d5dcf89ee..53434b8cc 100644
--- a/test/assets/datasource_get_by_id.xml
+++ b/test/assets/datasource_get_by_id.xml
@@ -1,6 +1,6 @@
-
+
diff --git a/test/assets/flow_refresh.xml b/test/assets/flow_refresh.xml
new file mode 100644
index 000000000..b2bb97a5d
--- /dev/null
+++ b/test/assets/flow_refresh.xml
@@ -0,0 +1,11 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/test/assets/flow_runs_get.xml b/test/assets/flow_runs_get.xml
new file mode 100644
index 000000000..bdce4cdfb
--- /dev/null
+++ b/test/assets/flow_runs_get.xml
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/test/assets/flow_runs_get_by_id.xml b/test/assets/flow_runs_get_by_id.xml
new file mode 100644
index 000000000..3a768fab4
--- /dev/null
+++ b/test/assets/flow_runs_get_by_id.xml
@@ -0,0 +1,10 @@
+
+
+
+
\ No newline at end of file
diff --git a/test/assets/flow_runs_get_by_id_failed.xml b/test/assets/flow_runs_get_by_id_failed.xml
new file mode 100644
index 000000000..9e766680b
--- /dev/null
+++ b/test/assets/flow_runs_get_by_id_failed.xml
@@ -0,0 +1,10 @@
+
+
+
+
\ No newline at end of file
diff --git a/test/assets/flow_runs_get_by_id_inprogress.xml b/test/assets/flow_runs_get_by_id_inprogress.xml
new file mode 100644
index 000000000..42e1a77f9
--- /dev/null
+++ b/test/assets/flow_runs_get_by_id_inprogress.xml
@@ -0,0 +1,10 @@
+
+
+
+
\ No newline at end of file
diff --git a/test/assets/job_get_by_id_failed.xml b/test/assets/job_get_by_id_failed.xml
new file mode 100644
index 000000000..c7456008e
--- /dev/null
+++ b/test/assets/job_get_by_id_failed.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+ c569ee62-9204-416f-843d-5ccfebc0231b
+
+
+
\ No newline at end of file
diff --git a/test/assets/job_get_by_id_inprogress.xml b/test/assets/job_get_by_id_inprogress.xml
new file mode 100644
index 000000000..7a23fb99d
--- /dev/null
+++ b/test/assets/job_get_by_id_inprogress.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+ c569ee62-9204-416f-843d-5ccfebc0231b
+
+
+
\ No newline at end of file
diff --git a/test/assets/workbook_get_by_id_personal.xml b/test/assets/workbook_get_by_id_personal.xml
new file mode 100644
index 000000000..90cc65e73
--- /dev/null
+++ b/test/assets/workbook_get_by_id_personal.xml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/test/test_datasource.py b/test/test_datasource.py
index e221f0c88..52a5eabe3 100644
--- a/test/test_datasource.py
+++ b/test/test_datasource.py
@@ -1,3 +1,4 @@
+from tableauserverclient.server.endpoint.fileuploads_endpoint import Fileuploads
import unittest
from io import BytesIO
import os
@@ -22,6 +23,7 @@
PUBLISH_XML_ASYNC = 'datasource_publish_async.xml'
REFRESH_XML = 'datasource_refresh.xml'
UPDATE_XML = 'datasource_update.xml'
+UPDATE_HYPER_DATA_XML = 'datasource_data_update.xml'
UPDATE_CONNECTION_XML = 'datasource_connection_update.xml'
@@ -44,6 +46,7 @@ def test_get(self):
self.assertEqual(2, pagination_item.total_available)
self.assertEqual('e76a1461-3b1d-4588-bf1b-17551a879ad9', all_datasources[0].id)
self.assertEqual('dataengine', all_datasources[0].datasource_type)
+ self.assertEqual('SampleDsDescription', all_datasources[0].description)
self.assertEqual('SampleDS', all_datasources[0].content_url)
self.assertEqual('2016-08-11T21:22:40Z', format_datetime(all_datasources[0].created_at))
self.assertEqual('2016-08-11T21:34:17Z', format_datetime(all_datasources[0].updated_at))
@@ -58,6 +61,7 @@ def test_get(self):
self.assertEqual('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', all_datasources[1].id)
self.assertEqual('dataengine', all_datasources[1].datasource_type)
+ self.assertEqual('description Sample', all_datasources[1].description)
self.assertEqual('Sampledatasource', all_datasources[1].content_url)
self.assertEqual('2016-08-04T21:31:55Z', format_datetime(all_datasources[1].created_at))
self.assertEqual('2016-08-04T21:31:55Z', format_datetime(all_datasources[1].updated_at))
@@ -92,6 +96,7 @@ def test_get_by_id(self):
self.assertEqual('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb', single_datasource.id)
self.assertEqual('dataengine', single_datasource.datasource_type)
+ self.assertEqual('abc description xyz', single_datasource.description)
self.assertEqual('Sampledatasource', single_datasource.content_url)
self.assertEqual('2016-08-04T21:31:55Z', format_datetime(single_datasource.created_at))
self.assertEqual('2016-08-04T21:31:55Z', format_datetime(single_datasource.updated_at))
@@ -100,7 +105,6 @@ def test_get_by_id(self):
self.assertEqual('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', single_datasource.project_id)
self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_datasource.owner_id)
self.assertEqual(set(['world', 'indicators', 'sample']), single_datasource.tags)
- self.assertEqual("test-ds", single_datasource.description)
self.assertEqual(TSC.DatasourceItem.AskDataEnablement.SiteDefault, single_datasource.ask_data_enablement)
def test_update(self):
@@ -315,7 +319,7 @@ def test_publish_async(self):
self.assertEqual('PublishDatasource', new_job.type)
self.assertEqual('0', new_job.progress)
self.assertEqual('2018-06-30T00:54:54Z', format_datetime(new_job.created_at))
- self.assertEqual('1', new_job.finish_code)
+ self.assertEqual(1, new_job.finish_code)
def test_publish_unnamed_file_object(self):
new_datasource = TSC.DatasourceItem('test')
@@ -333,7 +337,13 @@ def test_refresh_id(self):
with requests_mock.mock() as m:
m.post(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/refresh',
status_code=202, text=response_xml)
- self.server.datasources.refresh('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb')
+ new_job = self.server.datasources.refresh('9dbd2263-16b5-46e1-9c43-a76bb8ab65fb')
+
+ self.assertEqual('7c3d599e-949f-44c3-94a1-f30ba85757e4', new_job.id)
+ self.assertEqual('RefreshExtract', new_job.type)
+ self.assertEqual(None, new_job.progress)
+ self.assertEqual('2020-03-05T22:05:32Z', format_datetime(new_job.created_at))
+ self.assertEqual(-1, new_job.finish_code)
def test_refresh_object(self):
self.server.version = '2.8'
@@ -344,7 +354,90 @@ def test_refresh_object(self):
with requests_mock.mock() as m:
m.post(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/refresh',
status_code=202, text=response_xml)
- self.server.datasources.refresh(datasource)
+ new_job = self.server.datasources.refresh(datasource)
+
+ # We only check the `id`; remaining fields are already tested in `test_refresh_id`
+ self.assertEqual('7c3d599e-949f-44c3-94a1-f30ba85757e4', new_job.id)
+
+ def test_update_hyper_data_datasource_object(self):
+ """Calling `update_hyper_data` with a `DatasourceItem` should update that datasource"""
+ self.server.version = "3.13"
+ self.baseurl = self.server.datasources.baseurl
+
+ datasource = TSC.DatasourceItem('')
+ datasource._id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb'
+ response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML)
+ with requests_mock.mock() as m:
+ m.patch(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/data',
+ status_code=202, headers={"requestid": "test_id"}, text=response_xml)
+ new_job = self.server.datasources.update_hyper_data(datasource, request_id="test_id", actions=[])
+
+ self.assertEqual('5c0ba560-c959-424e-b08a-f32ef0bfb737', new_job.id)
+ self.assertEqual('UpdateUploadedFile', new_job.type)
+ self.assertEqual(None, new_job.progress)
+ self.assertEqual('2021-09-18T09:40:12Z', format_datetime(new_job.created_at))
+ self.assertEqual(-1, new_job.finish_code)
+
+ def test_update_hyper_data_connection_object(self):
+ """Calling `update_hyper_data` with a `ConnectionItem` should update that connection"""
+ self.server.version = "3.13"
+ self.baseurl = self.server.datasources.baseurl
+
+ connection = TSC.ConnectionItem()
+ connection._datasource_id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb'
+ connection._id = '7ecaccd8-39b0-4875-a77d-094f6e930019'
+ response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML)
+ with requests_mock.mock() as m:
+ m.patch(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/connections/7ecaccd8-39b0-4875-a77d-094f6e930019/data',
+ status_code=202, headers={"requestid": "test_id"}, text=response_xml)
+ new_job = self.server.datasources.update_hyper_data(connection, request_id="test_id", actions=[])
+
+ # We only check the `id`; remaining fields are already tested in `test_update_hyper_data_datasource_object`
+ self.assertEqual('5c0ba560-c959-424e-b08a-f32ef0bfb737', new_job.id)
+
+ def test_update_hyper_data_datasource_string(self):
+ """For convenience, calling `update_hyper_data` with a `str` should update the datasource with the corresponding UUID"""
+ self.server.version = "3.13"
+ self.baseurl = self.server.datasources.baseurl
+
+ datasource_id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb'
+ response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML)
+ with requests_mock.mock() as m:
+ m.patch(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/data',
+ status_code=202, headers={"requestid": "test_id"}, text=response_xml)
+ new_job = self.server.datasources.update_hyper_data(datasource_id, request_id="test_id", actions=[])
+
+ # We only check the `id`; remaining fields are already tested in `test_update_hyper_data_datasource_object`
+ self.assertEqual('5c0ba560-c959-424e-b08a-f32ef0bfb737', new_job.id)
+
+ def test_update_hyper_data_datasource_payload_file(self):
+ """If `payload` is present, we upload it and associate the job with it"""
+ self.server.version = "3.13"
+ self.baseurl = self.server.datasources.baseurl
+
+ datasource_id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb'
+ mock_upload_id = '10051:c3e56879876842d4b3600f20c1f79876-0:0'
+ response_xml = read_xml_asset(UPDATE_HYPER_DATA_XML)
+ with requests_mock.mock() as rm, \
+ unittest.mock.patch.object(Fileuploads, "upload", return_value=mock_upload_id):
+ rm.patch(self.baseurl + '/9dbd2263-16b5-46e1-9c43-a76bb8ab65fb/data?uploadSessionId=' + mock_upload_id,
+ status_code=202, headers={"requestid": "test_id"}, text=response_xml)
+ new_job = self.server.datasources.update_hyper_data(datasource_id, request_id="test_id",
+ actions=[], payload=asset('World Indicators.hyper'))
+
+ # We only check the `id`; remaining fields are already tested in `test_update_hyper_data_datasource_object`
+ self.assertEqual('5c0ba560-c959-424e-b08a-f32ef0bfb737', new_job.id)
+
+ def test_update_hyper_data_datasource_invalid_payload_file(self):
+ """If `payload` points to a non-existing file, we report an error"""
+ self.server.version = "3.13"
+ self.baseurl = self.server.datasources.baseurl
+ datasource_id = '9dbd2263-16b5-46e1-9c43-a76bb8ab65fb'
+ with self.assertRaises(IOError) as cm:
+ self.server.datasources.update_hyper_data(datasource_id, request_id="test_id",
+ actions=[], payload='no/such/file.missing')
+ exception = cm.exception
+ self.assertEqual(str(exception), "File path does not lead to an existing file.")
def test_delete(self):
with requests_mock.mock() as m:
diff --git a/test/test_exponential_backoff.py b/test/test_exponential_backoff.py
new file mode 100644
index 000000000..57229d4ce
--- /dev/null
+++ b/test/test_exponential_backoff.py
@@ -0,0 +1,62 @@
+import unittest
+from ._utils import mocked_time
+from tableauserverclient.exponential_backoff import ExponentialBackoffTimer
+
+
+class ExponentialBackoffTests(unittest.TestCase):
+ def test_exponential(self):
+ with mocked_time() as mock_time:
+ exponentialBackoff = ExponentialBackoffTimer()
+ # The creation of our mock shouldn't sleep
+ self.assertAlmostEqual(mock_time(), 0)
+ # The first sleep sleeps for a rather short time, the following sleeps become longer
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 0.5)
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 1.2)
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 2.18)
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 3.552)
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 5.4728)
+
+
+ def test_exponential_saturation(self):
+ with mocked_time() as mock_time:
+ exponentialBackoff = ExponentialBackoffTimer()
+ for _ in range(99):
+ exponentialBackoff.sleep()
+ # We don't increase the sleep time above 30 seconds.
+ # Otherwise, the exponential sleep time could easily
+ # reach minutes or even hours between polls
+ for _ in range(5):
+ s = mock_time()
+ exponentialBackoff.sleep()
+ slept = mock_time() - s
+ self.assertAlmostEqual(slept, 30)
+
+
+ def test_timeout(self):
+ with mocked_time() as mock_time:
+ exponentialBackoff = ExponentialBackoffTimer(timeout=4.5)
+ for _ in range(4):
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 3.552)
+ # Usually, the following sleep would sleep until 5.5, but due to
+ # the timeout we wait less; thereby we make sure to take the timeout
+ # into account as good as possible
+ exponentialBackoff.sleep()
+ self.assertAlmostEqual(mock_time(), 4.5)
+ # The next call to `sleep` will raise a TimeoutError
+ with self.assertRaises(TimeoutError):
+ exponentialBackoff.sleep()
+
+
+ def test_timeout_zero(self):
+ with mocked_time() as mock_time:
+ # The construction of the timer doesn't throw, yet
+ exponentialBackoff = ExponentialBackoffTimer(timeout = 0)
+ # But the first `sleep` immediately throws
+ with self.assertRaises(TimeoutError):
+ exponentialBackoff.sleep()
diff --git a/test/test_fileuploads.py b/test/test_fileuploads.py
index 9d115636f..51662e4a2 100644
--- a/test/test_fileuploads.py
+++ b/test/test_fileuploads.py
@@ -4,7 +4,6 @@
from ._utils import asset
from tableauserverclient.server import Server
-from tableauserverclient.server.endpoint.fileuploads_endpoint import Fileuploads
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
FILEUPLOAD_INITIALIZE = os.path.join(TEST_ASSET_DIR, 'fileupload_initialize.xml')
@@ -22,23 +21,18 @@ def setUp(self):
self.baseurl = '{}/sites/{}/fileUploads'.format(self.server.baseurl, self.server.site_id)
def test_read_chunks_file_path(self):
- fileuploads = Fileuploads(self.server)
-
file_path = asset('SampleWB.twbx')
- chunks = fileuploads.read_chunks(file_path)
+ chunks = self.server.fileuploads._read_chunks(file_path)
for chunk in chunks:
self.assertIsNotNone(chunk)
def test_read_chunks_file_object(self):
- fileuploads = Fileuploads(self.server)
-
with open(asset('SampleWB.twbx'), 'rb') as f:
- chunks = fileuploads.read_chunks(f)
+ chunks = self.server.fileuploads._read_chunks(f)
for chunk in chunks:
self.assertIsNotNone(chunk)
def test_upload_chunks_file_path(self):
- fileuploads = Fileuploads(self.server)
file_path = asset('SampleWB.twbx')
upload_id = '7720:170fe6b1c1c7422dadff20f944d58a52-1:0'
@@ -49,12 +43,11 @@ def test_upload_chunks_file_path(self):
with requests_mock.mock() as m:
m.post(self.baseurl, text=initialize_response_xml)
m.put(self.baseurl + '/' + upload_id, text=append_response_xml)
- actual = fileuploads.upload_chunks(self.server, file_path)
+ actual = self.server.fileuploads.upload(file_path)
self.assertEqual(upload_id, actual)
def test_upload_chunks_file_object(self):
- fileuploads = Fileuploads(self.server)
upload_id = '7720:170fe6b1c1c7422dadff20f944d58a52-1:0'
with open(asset('SampleWB.twbx'), 'rb') as file_content:
@@ -65,6 +58,6 @@ def test_upload_chunks_file_object(self):
with requests_mock.mock() as m:
m.post(self.baseurl, text=initialize_response_xml)
m.put(self.baseurl + '/' + upload_id, text=append_response_xml)
- actual = fileuploads.upload_chunks(self.server, file_content)
+ actual = self.server.fileuploads.upload(file_content)
self.assertEqual(upload_id, actual)
diff --git a/test/test_flow.py b/test/test_flow.py
index f5c057c30..545623d03 100644
--- a/test/test_flow.py
+++ b/test/test_flow.py
@@ -12,6 +12,7 @@
POPULATE_CONNECTIONS_XML = 'flow_populate_connections.xml'
POPULATE_PERMISSIONS_XML = 'flow_populate_permissions.xml'
UPDATE_XML = 'flow_update.xml'
+REFRESH_XML = 'flow_refresh.xml'
class FlowTests(unittest.TestCase):
@@ -113,3 +114,22 @@ def test_populate_permissions(self):
TSC.Permission.Capability.Write: TSC.Permission.Mode.Allow,
TSC.Permission.Capability.Read: TSC.Permission.Mode.Allow,
})
+
+ def test_refresh(self):
+ with open(asset(REFRESH_XML), 'rb') as f:
+ response_xml = f.read().decode('utf-8')
+ with requests_mock.mock() as m:
+ m.post(self.baseurl + '/92967d2d-c7e2-46d0-8847-4802df58f484/run', text=response_xml)
+ flow_item = TSC.FlowItem('test')
+ flow_item._id = '92967d2d-c7e2-46d0-8847-4802df58f484'
+ refresh_job = self.server.flows.refresh(flow_item)
+
+ self.assertEqual(refresh_job.id, 'd1b2ccd0-6dfa-444a-aee4-723dbd6b7c9d')
+ self.assertEqual(refresh_job.mode, 'Asynchronous')
+ self.assertEqual(refresh_job.type, 'RunFlow')
+ self.assertEqual(format_datetime(refresh_job.created_at), '2018-05-22T13:00:29Z')
+ self.assertIsInstance(refresh_job.flow_run, TSC.FlowRunItem)
+ self.assertEqual(refresh_job.flow_run.id, 'e0c3067f-2333-4eee-8028-e0a56ca496f6')
+ self.assertEqual(refresh_job.flow_run.flow_id, '92967d2d-c7e2-46d0-8847-4802df58f484')
+ self.assertEqual(format_datetime(refresh_job.flow_run.started_at), '2018-05-22T13:00:29Z')
+
diff --git a/test/test_flowruns.py b/test/test_flowruns.py
new file mode 100644
index 000000000..d2e72f31a
--- /dev/null
+++ b/test/test_flowruns.py
@@ -0,0 +1,104 @@
+import unittest
+import os
+import requests_mock
+import xml.etree.ElementTree as ET
+import tableauserverclient as TSC
+from tableauserverclient.datetime_helpers import format_datetime
+from tableauserverclient.server.endpoint.exceptions import FlowRunFailedException
+from tableauserverclient.server.request_factory import RequestFactory
+from ._utils import read_xml_asset, mocked_time
+
+GET_XML = 'flow_runs_get.xml'
+GET_BY_ID_XML = 'flow_runs_get_by_id.xml'
+GET_BY_ID_FAILED_XML = 'flow_runs_get_by_id_failed.xml'
+GET_BY_ID_INPROGRESS_XML = 'flow_runs_get_by_id_inprogress.xml'
+
+
+class FlowRunTests(unittest.TestCase):
+ def setUp(self):
+ self.server = TSC.Server('http://test')
+
+ # Fake signin
+ self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'
+ self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'
+ self.server.version = "3.10"
+
+ self.baseurl = self.server.flow_runs.baseurl
+
+ def test_get(self):
+ response_xml = read_xml_asset(GET_XML)
+ with requests_mock.mock() as m:
+ m.get(self.baseurl, text=response_xml)
+ all_flow_runs, pagination_item = self.server.flow_runs.get()
+
+ self.assertEqual(2, pagination_item.total_available)
+ self.assertEqual('cc2e652d-4a9b-4476-8c93-b238c45db968', all_flow_runs[0].id)
+ self.assertEqual('2021-02-11T01:42:55Z', format_datetime(all_flow_runs[0].started_at))
+ self.assertEqual('2021-02-11T01:57:38Z', format_datetime(all_flow_runs[0].completed_at))
+ self.assertEqual('Success', all_flow_runs[0].status)
+ self.assertEqual('100', all_flow_runs[0].progress)
+ self.assertEqual('aa23f4ac-906f-11e9-86fb-3f0f71412e77', all_flow_runs[0].background_job_id)
+
+ self.assertEqual('a3104526-c0c6-4ea5-8362-e03fc7cbd7ee', all_flow_runs[1].id)
+ self.assertEqual('2021-02-13T04:05:30Z', format_datetime(all_flow_runs[1].started_at))
+ self.assertEqual('2021-02-13T04:05:35Z', format_datetime(all_flow_runs[1].completed_at))
+ self.assertEqual('Failed', all_flow_runs[1].status)
+ self.assertEqual('100', all_flow_runs[1].progress)
+ self.assertEqual('1ad21a9d-2530-4fbf-9064-efd3c736e023', all_flow_runs[1].background_job_id)
+
+ def test_get_by_id(self):
+ response_xml = read_xml_asset(GET_BY_ID_XML)
+ with requests_mock.mock() as m:
+ m.get(self.baseurl + "/cc2e652d-4a9b-4476-8c93-b238c45db968", text=response_xml)
+ flow_run = self.server.flow_runs.get_by_id("cc2e652d-4a9b-4476-8c93-b238c45db968")
+
+ self.assertEqual('cc2e652d-4a9b-4476-8c93-b238c45db968', flow_run.id)
+ self.assertEqual('2021-02-11T01:42:55Z', format_datetime(flow_run.started_at))
+ self.assertEqual('2021-02-11T01:57:38Z', format_datetime(flow_run.completed_at))
+ self.assertEqual('Success', flow_run.status)
+ self.assertEqual('100', flow_run.progress)
+ self.assertEqual('1ad21a9d-2530-4fbf-9064-efd3c736e023', flow_run.background_job_id)
+
+ def test_cancel_id(self):
+ with requests_mock.mock() as m:
+ m.put(self.baseurl + '/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', status_code=204)
+ self.server.flow_runs.cancel('ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')
+
+ def test_cancel_item(self):
+ run = TSC.FlowRunItem()
+ run._id = 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760'
+ with requests_mock.mock() as m:
+ m.put(self.baseurl + '/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', status_code=204)
+ self.server.flow_runs.cancel(run)
+
+
+ def test_wait_for_job_finished(self):
+ # Waiting for an already finished job, directly returns that job's info
+ response_xml = read_xml_asset(GET_BY_ID_XML)
+ flow_run_id = 'cc2e652d-4a9b-4476-8c93-b238c45db968'
+ with mocked_time(), requests_mock.mock() as m:
+ m.get('{0}/{1}'.format(self.baseurl, flow_run_id), text=response_xml)
+ flow_run = self.server.flow_runs.wait_for_job(flow_run_id)
+
+ self.assertEqual(flow_run_id, flow_run.id)
+ self.assertEqual(flow_run.progress, "100")
+
+
+ def test_wait_for_job_failed(self):
+ # Waiting for a failed job raises an exception
+ response_xml = read_xml_asset(GET_BY_ID_FAILED_XML)
+ flow_run_id = 'c2b35d5a-e130-471a-aec8-7bc5435fe0e7'
+ with mocked_time(), requests_mock.mock() as m:
+ m.get('{0}/{1}'.format(self.baseurl, flow_run_id), text=response_xml)
+ with self.assertRaises(FlowRunFailedException):
+ self.server.flow_runs.wait_for_job(flow_run_id)
+
+
+ def test_wait_for_job_timeout(self):
+ # Waiting for a job which doesn't terminate will throw an exception
+ response_xml = read_xml_asset(GET_BY_ID_INPROGRESS_XML)
+ flow_run_id = '71afc22c-9c06-40be-8d0f-4c4166d29e6c'
+ with mocked_time(), requests_mock.mock() as m:
+ m.get('{0}/{1}'.format(self.baseurl, flow_run_id), text=response_xml)
+ with self.assertRaises(TimeoutError):
+ self.server.flow_runs.wait_for_job(flow_run_id, timeout=30)
diff --git a/test/test_job.py b/test/test_job.py
index 08b98b815..70bca996c 100644
--- a/test/test_job.py
+++ b/test/test_job.py
@@ -4,12 +4,16 @@
import requests_mock
import tableauserverclient as TSC
from tableauserverclient.datetime_helpers import utc
-from ._utils import read_xml_asset
+from tableauserverclient.server.endpoint.exceptions import JobFailedException
+from ._utils import read_xml_asset, mocked_time
TEST_ASSET_DIR = os.path.join(os.path.dirname(__file__), 'assets')
GET_XML = 'job_get.xml'
GET_BY_ID_XML = 'job_get_by_id.xml'
+GET_BY_ID_FAILED_XML = 'job_get_by_id_failed.xml'
+GET_BY_ID_CANCELLED_XML = 'job_get_by_id_cancelled.xml'
+GET_BY_ID_INPROGRESS_XML = 'job_get_by_id_inprogress.xml'
class JobTests(unittest.TestCase):
@@ -49,9 +53,6 @@ def test_get_by_id(self):
m.get('{0}/{1}'.format(self.baseurl, job_id), text=response_xml)
job = self.server.jobs.get_by_id(job_id)
- created_at = datetime(2020, 5, 13, 20, 23, 45, tzinfo=utc)
- updated_at = datetime(2020, 5, 13, 20, 25, 18, tzinfo=utc)
- ended_at = datetime(2020, 5, 13, 20, 25, 18, tzinfo=utc)
self.assertEqual(job_id, job.id)
self.assertListEqual(job.notes, ['Job detail notes'])
@@ -72,3 +73,35 @@ def test_cancel_item(self):
with requests_mock.mock() as m:
m.put(self.baseurl + '/ee8c6e70-43b6-11e6-af4f-f7b0d8e20760', status_code=204)
self.server.jobs.cancel(job)
+
+
+ def test_wait_for_job_finished(self):
+ # Waiting for an already finished job, directly returns that job's info
+ response_xml = read_xml_asset(GET_BY_ID_XML)
+ job_id = '2eef4225-aa0c-41c4-8662-a76d89ed7336'
+ with mocked_time(), requests_mock.mock() as m:
+ m.get('{0}/{1}'.format(self.baseurl, job_id), text=response_xml)
+ job = self.server.jobs.wait_for_job(job_id)
+
+ self.assertEqual(job_id, job.id)
+ self.assertListEqual(job.notes, ['Job detail notes'])
+
+
+ def test_wait_for_job_failed(self):
+ # Waiting for a failed job raises an exception
+ response_xml = read_xml_asset(GET_BY_ID_FAILED_XML)
+ job_id = '77d5e57a-2517-479f-9a3c-a32025f2b64d'
+ with mocked_time(), requests_mock.mock() as m:
+ m.get('{0}/{1}'.format(self.baseurl, job_id), text=response_xml)
+ with self.assertRaises(JobFailedException):
+ self.server.jobs.wait_for_job(job_id)
+
+
+ def test_wait_for_job_timeout(self):
+ # Waiting for a job which doesn't terminate will throw an exception
+ response_xml = read_xml_asset(GET_BY_ID_INPROGRESS_XML)
+ job_id = '77d5e57a-2517-479f-9a3c-a32025f2b64d'
+ with mocked_time(), requests_mock.mock() as m:
+ m.get('{0}/{1}'.format(self.baseurl, job_id), text=response_xml)
+ with self.assertRaises(TimeoutError):
+ self.server.jobs.wait_for_job(job_id, timeout=30)
diff --git a/test/test_workbook.py b/test/test_workbook.py
index fc1344b9e..459b1f905 100644
--- a/test/test_workbook.py
+++ b/test/test_workbook.py
@@ -20,6 +20,7 @@
ADD_TAGS_XML = os.path.join(TEST_ASSET_DIR, 'workbook_add_tags.xml')
GET_BY_ID_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id.xml')
+GET_BY_ID_XML_PERSONAL = os.path.join(TEST_ASSET_DIR, 'workbook_get_by_id_personal.xml')
GET_EMPTY_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_empty.xml')
GET_INVALID_DATE_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get_invalid_date.xml')
GET_XML = os.path.join(TEST_ASSET_DIR, 'workbook_get.xml')
@@ -128,6 +129,31 @@ def test_get_by_id(self):
self.assertEqual('ENDANGERED SAFARI', single_workbook.views[0].name)
self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', single_workbook.views[0].content_url)
+ def test_get_by_id_personal(self):
+ # workbooks in personal space don't have project_id or project_name
+ with open(GET_BY_ID_XML_PERSONAL, 'rb') as f:
+ response_xml = f.read().decode('utf-8')
+ with requests_mock.mock() as m:
+ m.get(self.baseurl + '/3cc6cd06-89ce-4fdc-b935-5294135d6d43', text=response_xml)
+ single_workbook = self.server.workbooks.get_by_id('3cc6cd06-89ce-4fdc-b935-5294135d6d43')
+
+ self.assertEqual('3cc6cd06-89ce-4fdc-b935-5294135d6d43', single_workbook.id)
+ self.assertEqual('SafariSample', single_workbook.name)
+ self.assertEqual('SafariSample', single_workbook.content_url)
+ self.assertEqual('http://tableauserver/#/workbooks/2/views', single_workbook.webpage_url)
+ self.assertEqual(False, single_workbook.show_tabs)
+ self.assertEqual(26, single_workbook.size)
+ self.assertEqual('2016-07-26T20:34:56Z', format_datetime(single_workbook.created_at))
+ self.assertEqual('description for SafariSample', single_workbook.description)
+ self.assertEqual('2016-07-26T20:35:05Z', format_datetime(single_workbook.updated_at))
+ self.assertTrue(single_workbook.project_id)
+ self.assertIsNone(single_workbook.project_name)
+ self.assertEqual('5de011f8-5aa9-4d5b-b991-f462c8dd6bb7', single_workbook.owner_id)
+ self.assertEqual(set(['Safari', 'Sample']), single_workbook.tags)
+ self.assertEqual('d79634e1-6063-4ec9-95ff-50acbf609ff5', single_workbook.views[0].id)
+ self.assertEqual('ENDANGERED SAFARI', single_workbook.views[0].name)
+ self.assertEqual('SafariSample/sheets/ENDANGEREDSAFARI', single_workbook.views[0].content_url)
+
def test_get_by_id_missing_id(self):
self.assertRaises(ValueError, self.server.workbooks.get_by_id, '')
@@ -590,7 +616,7 @@ def test_publish_async(self):
self.assertEqual('PublishWorkbook', new_job.type)
self.assertEqual('0', new_job.progress)
self.assertEqual('2018-06-29T23:22:32Z', format_datetime(new_job.created_at))
- self.assertEqual('1', new_job.finish_code)
+ self.assertEqual(1, new_job.finish_code)
def test_publish_invalid_file(self):
new_workbook = TSC.WorkbookItem('test', 'ee8c6e70-43b6-11e6-af4f-f7b0d8e20760')