diff --git a/.gitignore b/.gitignore index 13d654b..b5a350f 100644 --- a/.gitignore +++ b/.gitignore @@ -13,4 +13,6 @@ _site/ *.swp .vscode/ ve/ +.venv +.coverage .noseids \ No newline at end of file diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..b047bc7 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,17 @@ +[MASTER] +ignore=docs,setup.py + +[MESSAGES CONTROL] + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" + +# disabled as long as we support python 2 +disable=useless-object-inheritance diff --git a/.travis.yml b/.travis.yml index bddfbc4..95a3d78 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,9 @@ language: python python: - - 2.7 - 3.6 + - 3.7 + - 3.8 + - 3.9 # Enable 3.7 without globally enabling sudo and dist: xenial for other build jobs matrix: include: @@ -10,12 +12,11 @@ matrix: sudo: true # command to install dependencies install: - # install numpy and pandas for full-suite testing - - pip install numpy - - pip install pandas + # install all dependencies for full-suite testing - pip install -r requirements.txt + - pip install -r dev-requirements.txt # install pycap - pip install -e . script: -- "nosetests -w test/ -v" +- "pytest" - "cd docs && make html" diff --git a/HISTORY.rst b/HISTORY.rst index 487ef41..d23d295 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -1,6 +1,23 @@ HISTORY ------- +1.1.2 (2020-11-05) ++++++++++++++++++++ + +* API Support + - Add support for ``exportFieldNames`` call (#125 @chgreer) + - Add ``dateRangeBegin`` and ``dateRangeEnd`` parameters to ``Project.export_records`` (#124 @chgreer) + +* Package Improvements + - Use ``pytest`` for full test suite (#131) + - Enforce ``black`` and ``pylint`` style and formatting on project (#131) + - Deprecate support for Python 2 (#131) + - Add ``pandas`` as an ``extra_requires`` (#131) + +* Documentation + - Update README with new community support model and how to contribute (#131) + + 1.1.1 (2020-08-18) +++++++++++++++++++ @@ -11,15 +28,15 @@ HISTORY +++++++++++++++++++ * API Support - - Add `rec_type` support in `import_records()` (#40 @dckc) - - Add `export_checkbox_labels` keyword arg to `export_records()` (#48 Tyler Rivera) + - Add ``rec_type`` support in ``import_records()`` (#40 @dckc) + - Add ``export_checkbox_labels`` keyword arg to ``export_records()`` (#48 Tyler Rivera) - Properly backfill requested fields for >6.X servers (#55) - Add Export Survey Participant List method (#71) - - Add `filter_logic` to export_records (#85 @erikh360) - - Add `forceAutoNumber` parameter to `import_records()` (#86 @CarlosBorroto) + - Add ``filter_logic`` to export_records (#85 @erikh360) + - Add ``forceAutoNumber`` parameter to ``import_records()`` (#86 @CarlosBorroto) - Add Export Project Information (#106 @martinburchell) - Add Generate Next Record Name (#107 @martinburchell) - - Add `repeat_instance` parameter to `imp_file` request (#104 @martinburchell) + - Add ``repeat_instance`` parameter to ``imp_file`` request (#104 @martinburchell) - Add Delete Record (#77 @damonms) - Add Export Reports (#91 @mcarmack) @@ -29,7 +46,7 @@ HISTORY - Add Python 3 support (#67, #92 @jmillxyz, @fonnesbeck) - Remove obsolete Project.filter() (#105 @martinburchell) - Change API parameters from comma-separated to arrays (#110 @martinburchell) - - Use single `requests.Session()` for connections (#120 @KarthikMasi) + - Use single ``requests.Session()`` for connections (#120 @KarthikMasi) * Bug Fixes - Allow later versions of semantic-version (#108 @martinburchell) diff --git a/README.rst b/README.rst index 91329a2..030756b 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -**I am no longer actively developing this code base. Please continue to submit bugs and I'll do my best to tackle them.** +**This project is community maintained. Please continue to submit bugs and feature requests, though it's the community's responsibility to address them.** .. image:: https://travis-ci.org/redcap-tools/PyCap.svg?branch=master :target: https://travis-ci.org/redcap-tools/PyCap @@ -98,15 +98,56 @@ Usage Installation ------------ + +Install with :code:`pip` +:: + + $ pip install PyCap + +Install extra requirements, which allows returning project data as a :code:`pandas.DataFrame` +:: + + $ pip install PyCap[pandas] + +Install from GitHub :: $ git clone git://github.com/sburns/PyCap.git PyCap $ cd PyCap $ python setup.py install - OR - $ pip install PyCap +Contributing +------------ + +1. Create a virtual environment and activate it +:: + + $ python -m venv .venv + $ source .venv/Scripts/activate + +2. Install `pip-tools `_. +:: + + $ pip install pip-tools + +3. Install all project dependencies +:: + + $ pip-sync requirements.txt dev-requirements.txt + +4. Install the package, with a link to the source code. This ensures any changes you +make are immendiate available to test. +:: + + $ python setup.py develop + +5. Add your changes and make sure your changes pass all tests +:: + + $ pytest + +Finally, start a pull request! Citing ------ diff --git a/dev-requirements.in b/dev-requirements.in new file mode 100644 index 0000000..28546e3 --- /dev/null +++ b/dev-requirements.in @@ -0,0 +1,12 @@ +# Constrain dev requirements against production requirements +-c requirements.txt +pandas +pytest +pytest-cov +pytest-black +pytest-pylint +Sphinx +numpydoc +wheel +responses +twine diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..7f6869f --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1,75 @@ +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile '.\dev-requirements.in' +# +alabaster==0.7.12 # via sphinx +appdirs==1.4.4 # via black +astroid==2.4.2 # via pylint +atomicwrites==1.4.0 # via pytest +attrs==20.2.0 # via pytest +babel==2.8.0 # via sphinx +black==20.8b1 # via pytest-black +bleach==3.2.1 # via readme-renderer +certifi==2020.6.20 # via -c .\requirements.txt, requests +chardet==3.0.4 # via -c .\requirements.txt, requests +click==7.1.2 # via black +colorama==0.4.4 # via pylint, pytest, sphinx, twine +coverage==5.3 # via pytest-cov +docutils==0.16 # via readme-renderer, sphinx +idna==2.10 # via -c .\requirements.txt, requests +imagesize==1.2.0 # via sphinx +iniconfig==1.1.1 # via pytest +isort==5.6.4 # via pylint +jinja2==2.11.2 # via numpydoc, sphinx +keyring==21.4.0 # via twine +lazy-object-proxy==1.4.3 # via astroid +markupsafe==1.1.1 # via jinja2 +mccabe==0.6.1 # via pylint +mypy-extensions==0.4.3 # via black +numpy==1.19.4 # via pandas +numpydoc==1.1.0 # via -r .\dev-requirements.in +packaging==20.4 # via bleach, pytest, sphinx +pandas==1.1.4 # via -r .\dev-requirements.in +pathspec==0.8.0 # via black +pkginfo==1.6.1 # via twine +pluggy==0.13.1 # via pytest +py==1.9.0 # via pytest +pygments==2.7.2 # via readme-renderer, sphinx +pylint==2.6.0 # via pytest-pylint +pyparsing==2.4.7 # via packaging +pytest-black==0.3.12 # via -r .\dev-requirements.in +pytest-cov==2.10.1 # via -r .\dev-requirements.in +pytest-pylint==0.17.0 # via -r .\dev-requirements.in +pytest==6.1.2 # via -r .\dev-requirements.in, pytest-black, pytest-cov, pytest-pylint +python-dateutil==2.8.1 # via pandas +pytz==2020.4 # via babel, pandas +pywin32-ctypes==0.2.0 # via keyring +readme-renderer==28.0 # via twine +regex==2020.10.28 # via black +requests-toolbelt==0.9.1 # via twine +requests==2.24.0 # via -c .\requirements.txt, requests-toolbelt, responses, sphinx, twine +responses==0.12.0 # via -r .\dev-requirements.in +rfc3986==1.4.0 # via twine +six==1.15.0 # via astroid, bleach, packaging, python-dateutil, readme-renderer, responses +snowballstemmer==2.0.0 # via sphinx +sphinx==3.3.0 # via -r .\dev-requirements.in, numpydoc +sphinxcontrib-applehelp==1.0.2 # via sphinx +sphinxcontrib-devhelp==1.0.2 # via sphinx +sphinxcontrib-htmlhelp==1.0.3 # via sphinx +sphinxcontrib-jsmath==1.0.1 # via sphinx +sphinxcontrib-qthelp==1.0.3 # via sphinx +sphinxcontrib-serializinghtml==1.1.4 # via sphinx +toml==0.10.2 # via black, pylint, pytest, pytest-black, pytest-pylint +tqdm==4.51.0 # via twine +twine==3.2.0 # via -r .\dev-requirements.in +typed-ast==1.4.1 # via black +typing-extensions==3.7.4.3 # via black +urllib3==1.25.11 # via -c .\requirements.txt, requests, responses +webencodings==0.5.1 # via bleach +wheel==0.35.1 # via -r .\dev-requirements.in +wrapt==1.12.1 # via astroid + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/docs/_themes/flask_theme_support.py b/docs/_themes/flask_theme_support.py index 33f4744..0dcf53b 100644 --- a/docs/_themes/flask_theme_support.py +++ b/docs/_themes/flask_theme_support.py @@ -1,7 +1,19 @@ # flasky extensions. flasky pygments style based on tango style from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic, Whitespace, Punctuation, Other, Literal +from pygments.token import ( + Keyword, + Name, + Comment, + String, + Error, + Number, + Operator, + Generic, + Whitespace, + Punctuation, + Other, + Literal, +) class FlaskyStyle(Style): @@ -10,77 +22,68 @@ class FlaskyStyle(Style): styles = { # No corresponding class for the following: - #Text: "", # class: '' - Whitespace: "underline #f8f8f8", # class: 'w' - Error: "#a40000 border:#ef2929", # class: 'err' - Other: "#000000", # class 'x' - - Comment: "italic #8f5902", # class: 'c' - Comment.Preproc: "noitalic", # class: 'cp' - - Keyword: "bold #004461", # class: 'k' - Keyword.Constant: "bold #004461", # class: 'kc' - Keyword.Declaration: "bold #004461", # class: 'kd' - Keyword.Namespace: "bold #004461", # class: 'kn' - Keyword.Pseudo: "bold #004461", # class: 'kp' - Keyword.Reserved: "bold #004461", # class: 'kr' - Keyword.Type: "bold #004461", # class: 'kt' - - Operator: "#582800", # class: 'o' - Operator.Word: "bold #004461", # class: 'ow' - like keywords - - Punctuation: "bold #000000", # class: 'p' - + # Text: "", # class: '' + Whitespace: "underline #f8f8f8", # class: 'w' + Error: "#a40000 border:#ef2929", # class: 'err' + Other: "#000000", # class 'x' + Comment: "italic #8f5902", # class: 'c' + Comment.Preproc: "noitalic", # class: 'cp' + Keyword: "bold #004461", # class: 'k' + Keyword.Constant: "bold #004461", # class: 'kc' + Keyword.Declaration: "bold #004461", # class: 'kd' + Keyword.Namespace: "bold #004461", # class: 'kn' + Keyword.Pseudo: "bold #004461", # class: 'kp' + Keyword.Reserved: "bold #004461", # class: 'kr' + Keyword.Type: "bold #004461", # class: 'kt' + Operator: "#582800", # class: 'o' + Operator.Word: "bold #004461", # class: 'ow' - like keywords + Punctuation: "bold #000000", # class: 'p' # because special names such as Name.Class, Name.Function, etc. # are not recognized as such later in the parsing, we choose them # to look the same as ordinary variables. - Name: "#000000", # class: 'n' - Name.Attribute: "#c4a000", # class: 'na' - to be revised - Name.Builtin: "#004461", # class: 'nb' - Name.Builtin.Pseudo: "#3465a4", # class: 'bp' - Name.Class: "#000000", # class: 'nc' - to be revised - Name.Constant: "#000000", # class: 'no' - to be revised - Name.Decorator: "#888", # class: 'nd' - to be revised - Name.Entity: "#ce5c00", # class: 'ni' - Name.Exception: "bold #cc0000", # class: 'ne' - Name.Function: "#000000", # class: 'nf' - Name.Property: "#000000", # class: 'py' - Name.Label: "#f57900", # class: 'nl' - Name.Namespace: "#000000", # class: 'nn' - to be revised - Name.Other: "#000000", # class: 'nx' - Name.Tag: "bold #004461", # class: 'nt' - like a keyword - Name.Variable: "#000000", # class: 'nv' - to be revised - Name.Variable.Class: "#000000", # class: 'vc' - to be revised - Name.Variable.Global: "#000000", # class: 'vg' - to be revised - Name.Variable.Instance: "#000000", # class: 'vi' - to be revised - - Number: "#990000", # class: 'm' - - Literal: "#000000", # class: 'l' - Literal.Date: "#000000", # class: 'ld' - - String: "#4e9a06", # class: 's' - String.Backtick: "#4e9a06", # class: 'sb' - String.Char: "#4e9a06", # class: 'sc' - String.Doc: "italic #8f5902", # class: 'sd' - like a comment - String.Double: "#4e9a06", # class: 's2' - String.Escape: "#4e9a06", # class: 'se' - String.Heredoc: "#4e9a06", # class: 'sh' - String.Interpol: "#4e9a06", # class: 'si' - String.Other: "#4e9a06", # class: 'sx' - String.Regex: "#4e9a06", # class: 'sr' - String.Single: "#4e9a06", # class: 's1' - String.Symbol: "#4e9a06", # class: 'ss' - - Generic: "#000000", # class: 'g' - Generic.Deleted: "#a40000", # class: 'gd' - Generic.Emph: "italic #000000", # class: 'ge' - Generic.Error: "#ef2929", # class: 'gr' - Generic.Heading: "bold #000080", # class: 'gh' - Generic.Inserted: "#00A000", # class: 'gi' - Generic.Output: "#888", # class: 'go' - Generic.Prompt: "#745334", # class: 'gp' - Generic.Strong: "bold #000000", # class: 'gs' - Generic.Subheading: "bold #800080", # class: 'gu' - Generic.Traceback: "bold #a40000", # class: 'gt' + Name: "#000000", # class: 'n' + Name.Attribute: "#c4a000", # class: 'na' - to be revised + Name.Builtin: "#004461", # class: 'nb' + Name.Builtin.Pseudo: "#3465a4", # class: 'bp' + Name.Class: "#000000", # class: 'nc' - to be revised + Name.Constant: "#000000", # class: 'no' - to be revised + Name.Decorator: "#888", # class: 'nd' - to be revised + Name.Entity: "#ce5c00", # class: 'ni' + Name.Exception: "bold #cc0000", # class: 'ne' + Name.Function: "#000000", # class: 'nf' + Name.Property: "#000000", # class: 'py' + Name.Label: "#f57900", # class: 'nl' + Name.Namespace: "#000000", # class: 'nn' - to be revised + Name.Other: "#000000", # class: 'nx' + Name.Tag: "bold #004461", # class: 'nt' - like a keyword + Name.Variable: "#000000", # class: 'nv' - to be revised + Name.Variable.Class: "#000000", # class: 'vc' - to be revised + Name.Variable.Global: "#000000", # class: 'vg' - to be revised + Name.Variable.Instance: "#000000", # class: 'vi' - to be revised + Number: "#990000", # class: 'm' + Literal: "#000000", # class: 'l' + Literal.Date: "#000000", # class: 'ld' + String: "#4e9a06", # class: 's' + String.Backtick: "#4e9a06", # class: 'sb' + String.Char: "#4e9a06", # class: 'sc' + String.Doc: "italic #8f5902", # class: 'sd' - like a comment + String.Double: "#4e9a06", # class: 's2' + String.Escape: "#4e9a06", # class: 'se' + String.Heredoc: "#4e9a06", # class: 'sh' + String.Interpol: "#4e9a06", # class: 'si' + String.Other: "#4e9a06", # class: 'sx' + String.Regex: "#4e9a06", # class: 'sr' + String.Single: "#4e9a06", # class: 's1' + String.Symbol: "#4e9a06", # class: 'ss' + Generic: "#000000", # class: 'g' + Generic.Deleted: "#a40000", # class: 'gd' + Generic.Emph: "italic #000000", # class: 'ge' + Generic.Error: "#ef2929", # class: 'gr' + Generic.Heading: "bold #000080", # class: 'gh' + Generic.Inserted: "#00A000", # class: 'gi' + Generic.Output: "#888", # class: 'go' + Generic.Prompt: "#745334", # class: 'gp' + Generic.Strong: "bold #000000", # class: 'gs' + Generic.Subheading: "bold #800080", # class: 'gu' + Generic.Traceback: "bold #a40000", # class: 'gt' } diff --git a/docs/conf.py b/docs/conf.py index bff7f85..f6b3e8c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,41 +12,47 @@ # serve to show the default. import sys, os -sys.path.insert(0, os.path.abspath('..')) + +sys.path.insert(0, os.path.abspath("..")) import redcap # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', - 'sphinx.ext.autosummary', 'numpydoc'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.todo", + "sphinx.ext.viewcode", + "sphinx.ext.autosummary", + "numpydoc", +] numpydoc_show_class_members = False # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'PyCap' -copyright = u'2012, Scott Burns' +project = u"PyCap" +copyright = u"2012, Scott Burns" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -58,173 +64,167 @@ release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'flask_theme_support.FlaskyStyle' +pygments_style = "flask_theme_support.FlaskyStyle" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = ['_themes'] +# html_theme_path = ['_themes'] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'default' +html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +# html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'PyCapdoc' +htmlhelp_basename = "PyCapdoc" # -- Options for LaTeX output -------------------------------------------------- latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', + # The paper size ('letterpaper' or 'a4paper'). + #'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + #'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - ('index', 'PyCap.tex', u'PyCap Documentation', - u'Scott Burns', 'manual'), + ("index", "PyCap.tex", u"PyCap Documentation", u"Scott Burns", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # If true, show page references after internal links. -#latex_show_pagerefs = False +# latex_show_pagerefs = False # If true, show URL addresses after external links. -#latex_show_urls = False +# latex_show_urls = False # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_domain_indices = True +# latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'pycap', u'PyCap Documentation', - [u'Scott Burns'], 1) -] +man_pages = [("index", "pycap", u"PyCap Documentation", [u"Scott Burns"], 1)] # If true, show URL addresses after external links. -#man_show_urls = False +# man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ @@ -233,19 +233,25 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'PyCap', u'PyCap Documentation', - u'Scott Burns', 'PyCap', 'One line description of project.', - 'Miscellaneous'), + ( + "index", + "PyCap", + u"PyCap Documentation", + u"Scott Burns", + "PyCap", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. -#texinfo_appendices = [] +# texinfo_appendices = [] # If false, no module index is generated. -#texinfo_domain_indices = True +# texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' -sys.path.append(os.path.abspath('_themes')) -html_theme_path = ['_themes'] -html_theme = 'default' +# texinfo_show_urls = 'footnote' +sys.path.append(os.path.abspath("_themes")) +html_theme_path = ["_themes"] +html_theme = "default" diff --git a/fabfile.py b/fabfile.py deleted file mode 100644 index d248b21..0000000 --- a/fabfile.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -__author__ = 'Scott Burns ' -__license__ = 'MIT' -__copyright__ = '2014, Vanderbilt University' - -from fabric.api import local, lcd - - -def upload(): - local('python setup.py register') - local('python setup.py sdist upload') - local('python setup.py bdist_wheel upload') - - -def rebuild(): - clean() - local("python setup.py develop -u") - local("python setup.py clean") - local("python setup.py install") - clean() - - -def clean(): - local("""find redcap -type f -name "*.pyc" -exec rm {} \;""") - local("rm -rf build") - local("rm -rf dist") - local("rm -rf PyCap.egg-info") - - -def test(): - local('nosetests -sv -w test/') - -def doc(): - with lcd('docs'): - local('make html') diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..a4c6462 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,8 @@ +[pytest] +addopts = -rsxX -l --tb=short --strict --pylint --black --cov=redcap +# Keep current format for future version of pytest +junit_family=xunit1 +# Ignore unimportant warnings +filterwarnings = + ignore::UserWarning + ignore::DeprecationWarning diff --git a/redcap/__init__.py b/redcap/__init__.py index 6ab7f23..9ba1b0f 100644 --- a/redcap/__init__.py +++ b/redcap/__init__.py @@ -1,11 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -__author__ = 'Scott Burns ' -__license__ = 'MIT' -__copyright__ = '2014, Vanderbilt University' -__version__ = '1.1.2' - """ This module exposes the REDCap API through the Project class. Instantiate the class with the URL to your REDCap system along with an API key, probably @@ -19,3 +14,8 @@ class with the URL to your REDCap system along with an API key, probably from .project import Project from .request import RCRequest, RCAPIError, RedcapError + +__author__ = "Scott Burns " +__license__ = "MIT" +__copyright__ = "2014, Vanderbilt University" +__version__ = "1.1.2" diff --git a/redcap/project.py b/redcap/project.py index 9f96db6..3cbf54c 100755 --- a/redcap/project.py +++ b/redcap/project.py @@ -1,25 +1,30 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - -__author__ = 'Scott Burns ' -__license__ = 'MIT' -__copyright__ = '2014, Vanderbilt University' +"""User facing class for interacting with a REDCap Project""" import json import warnings +import semantic_version from .request import RCRequest, RedcapError, RequestException -import semantic_version try: from StringIO import StringIO except ImportError: from io import StringIO +__author__ = "Scott Burns " +__license__ = "MIT" +__copyright__ = "2014, Vanderbilt University" + +# pylint: disable=too-many-instance-attributes +# pylint: disable=too-many-arguments +# pylint: disable=too-many-public-methods +# pylint: disable=redefined-builtin class Project(object): """Main class for interacting with REDCap projects""" - def __init__(self, url, token, name='', verify_ssl=True, lazy=False): + def __init__(self, url, token, name="", verify_ssl=True, lazy=False): """ Parameters ---------- @@ -53,34 +58,39 @@ def __init__(self, url, token, name='', verify_ssl=True, lazy=False): self.configure() def configure(self): + """Fill in project attributes""" try: self.metadata = self.__md() - except RequestException: - raise RedcapError("Exporting metadata failed. Check your URL and token.") + except RequestException as request_fail: + raise RedcapError( + "Exporting metadata failed. Check your URL and token." + ) from request_fail try: self.redcap_version = self.__rcv() - except: - raise RedcapError("Determination of REDCap version failed") - self.field_names = self.filter_metadata('field_name') + except Exception as general_fail: + raise RedcapError( + "Determination of REDCap version failed" + ) from general_fail + self.field_names = self.filter_metadata("field_name") # we'll use the first field as the default id for each row self.def_field = self.field_names[0] - self.field_labels = self.filter_metadata('field_label') - self.forms = tuple(set(c['form_name'] for c in self.metadata)) + self.field_labels = self.filter_metadata("field_label") + self.forms = tuple(set(c["form_name"] for c in self.metadata)) # determine whether longitudinal - ev_data = self._call_api(self.__basepl('event'), 'exp_event')[0] - arm_data = self._call_api(self.__basepl('arm'), 'exp_arm')[0] + ev_data = self._call_api(self.__basepl("event"), "exp_event")[0] + arm_data = self._call_api(self.__basepl("arm"), "exp_arm")[0] - if isinstance(ev_data, dict) and ('error' in ev_data.keys()): + if isinstance(ev_data, dict) and ("error" in ev_data.keys()): events = tuple([]) else: events = ev_data - if isinstance(arm_data, dict) and ('error' in arm_data.keys()): + if isinstance(arm_data, dict) and ("error" in arm_data.keys()): arm_nums = tuple([]) arm_names = tuple([]) else: - arm_nums = tuple([a['arm_num'] for a in arm_data]) - arm_names = tuple([a['name'] for a in arm_data]) + arm_nums = tuple([a["arm_num"] for a in arm_data]) + arm_names = tuple([a["name"] for a in arm_data]) self.events = events self.arm_nums = arm_nums self.arm_names = arm_names @@ -88,28 +98,28 @@ def configure(self): def __md(self): """Return the project's metadata structure""" - p_l = self.__basepl('metadata') - p_l['content'] = 'metadata' - return self._call_api(p_l, 'metadata')[0] + p_l = self.__basepl("metadata") + p_l["content"] = "metadata" + return self._call_api(p_l, "metadata")[0] - def __basepl(self, content, rec_type='flat', format='json'): + def __basepl(self, content, rec_type="flat", format="json"): """Return a dictionary which can be used as is or added to for payloads""" - d = {'token': self.token, 'content': content, 'format': format} - if content not in ['metadata', 'file']: - d['type'] = rec_type - return d + payload_dict = {"token": self.token, "content": content, "format": format} + if content not in ["metapayload_dictata", "file"]: + payload_dict["type"] = rec_type + return payload_dict def __rcv(self): - p_l = self.__basepl('version') - rcv = self._call_api(p_l, 'version')[0].decode('utf-8') - if 'error' in rcv: - warnings.warn('Version information not available for this REDCap instance') - return '' + payload = self.__basepl("version") + rcv = self._call_api(payload, "version")[0].decode("utf-8") + if "error" in rcv: + warnings.warn("Version information not available for this REDCap instance") + return "" if semantic_version.validate(rcv): return semantic_version.Version(rcv) - else: - return rcv + + return rcv def is_longitudinal(self): """ @@ -118,9 +128,9 @@ def is_longitudinal(self): boolean : longitudinal status of this project """ - return len(self.events) > 0 and \ - len(self.arm_nums) > 0 and \ - len(self.arm_names) > 0 + return ( + len(self.events) > 0 and len(self.arm_nums) > 0 and len(self.arm_names) > 0 + ) def filter_metadata(self, key): """ @@ -146,7 +156,7 @@ def _kwargs(self): """Private method to build a dict for sending to RCRequest Other default kwargs to the http library should go here""" - return {'verify': self.verify} + return {"verify": self.verify} def _call_api(self, payload, typpe, **kwargs): request_kwargs = self._kwargs() @@ -154,7 +164,7 @@ def _call_api(self, payload, typpe, **kwargs): rcr = RCRequest(self.url, payload, typpe) return rcr.execute(**request_kwargs) - def export_fem(self, arms=None, format='json', df_kwargs=None): + def export_fem(self, arms=None, format="json", df_kwargs=None): """ Export the project's form to event mapping @@ -175,24 +185,25 @@ def export_fem(self, arms=None, format='json', df_kwargs=None): form-event mapping for the project """ ret_format = format - if format == 'df': - ret_format = 'csv' - pl = self.__basepl('formEventMapping', format=ret_format) + if format == "df": + ret_format = "csv" + payload = self.__basepl("formEventMapping", format=ret_format) if arms: for i, value in enumerate(arms): - pl["arms[{}]".format(i)] = value + payload["arms[{}]".format(i)] = value - response, _ = self._call_api(pl, 'exp_fem') - if format in ('json', 'csv', 'xml'): + response, _ = self._call_api(payload, "exp_fem") + if format in ("json", "csv", "xml"): return response - elif format == 'df': - if not df_kwargs: - df_kwargs = {} + if format != "df": + raise ValueError(("Unsupported format: '{}'").format(format)) + if not df_kwargs: + df_kwargs = {} - return self.read_csv(StringIO(response), **df_kwargs) + return self.read_csv(StringIO(response), **df_kwargs) - def export_field_names(self, field=None, format='json', df_kwargs=None): + def export_field_names(self, field=None, format="json", df_kwargs=None): """ Export the project's export field names @@ -215,25 +226,25 @@ def export_field_names(self, field=None, format='json', df_kwargs=None): metadata structure for the project. """ ret_format = format - if format == 'df': - ret_format = 'csv' + if format == "df": + ret_format = "csv" - pl = self.__basepl('exportFieldNames', format=ret_format) + payload = self.__basepl("exportFieldNames", format=ret_format) if field: - pl['field'] = field + payload["field"] = field - response, _ = self._call_api(pl, 'exp_field_names') + response, _ = self._call_api(payload, "exp_field_names") - if format in ('json', 'csv', 'xml'): + if format in ("json", "csv", "xml"): return response - elif format == 'df': - if not df_kwargs: - df_kwargs = {'index_col': 'original_field_name'} - return self.read_csv(StringIO(response), **df_kwargs) + if format != "df": + raise ValueError(("Unsupported format: '{}'").format(format)) + if not df_kwargs: + df_kwargs = {"index_col": "original_field_name"} + return self.read_csv(StringIO(response), **df_kwargs) - def export_metadata(self, fields=None, forms=None, format='json', - df_kwargs=None): + def export_metadata(self, fields=None, forms=None, format="json", df_kwargs=None): """ Export the project's metadata @@ -257,23 +268,25 @@ def export_metadata(self, fields=None, forms=None, format='json', metadata sttructure for the project. """ ret_format = format - if format == 'df': - ret_format = 'csv' - pl = self.__basepl('metadata', format=ret_format) + if format == "df": + ret_format = "csv" + payload = self.__basepl("metadata", format=ret_format) to_add = [fields, forms] - str_add = ['fields', 'forms'] + str_add = ["fields", "forms"] for key, data in zip(str_add, to_add): if data: for i, value in enumerate(data): - pl["{}[{}]".format(key, i)] = value + payload["{}[{}]".format(key, i)] = value - response, _ = self._call_api(pl, 'metadata') - if format in ('json', 'csv', 'xml'): + response, _ = self._call_api(payload, "metadata") + if format in ("json", "csv", "xml"): return response - elif format == 'df': - if not df_kwargs: - df_kwargs = {'index_col': 'field_name'} - return self.read_csv(StringIO(response), **df_kwargs) + if format != "df": + raise ValueError(("Unsupported format: '{}'").format(format)) + + if not df_kwargs: + df_kwargs = {"index_col": "field_name"} + return self.read_csv(StringIO(response), **df_kwargs) def delete_records(self, records): """ @@ -289,26 +302,39 @@ def delete_records(self, records): response : int Number of records deleted """ - pl = dict() - pl['action'] = 'delete' - pl['content'] = 'record' - pl['token'] = self.token + payload = dict() + payload["action"] = "delete" + payload["content"] = "record" + payload["token"] = self.token # Turn list of records into dict, and append to payload records_dict = { "records[{}]".format(idx): record for idx, record in enumerate(records) } - pl.update(records_dict) + payload.update(records_dict) - pl['format'] = format - response, _ = self._call_api(pl, 'del_record') + payload["format"] = format + response, _ = self._call_api(payload, "del_record") return response - def export_records(self, records=None, fields=None, forms=None, - events=None, raw_or_label='raw', event_name='label', - format='json', export_survey_fields=False, - export_data_access_groups=False, df_kwargs=None, - export_checkbox_labels=False, filter_logic=None, - date_begin=None, date_end=None): + # pylint: disable=too-many-branches + # pylint: disable=too-many-locals + def export_records( + self, + records=None, + fields=None, + forms=None, + events=None, + raw_or_label="raw", + event_name="label", + format="json", + export_survey_fields=False, + export_data_access_groups=False, + df_kwargs=None, + export_checkbox_labels=False, + filter_logic=None, + date_begin=None, + date_end=None, + ): """ Export data from the REDCap project. @@ -371,77 +397,104 @@ def export_records(self, records=None, fields=None, forms=None, exported data """ ret_format = format - if format == 'df': - ret_format = 'csv' - pl = self.__basepl('record', format=ret_format) + if format == "df": + ret_format = "csv" + payload = self.__basepl("record", format=ret_format) fields = self.backfill_fields(fields, forms) - keys_to_add = (records, fields, forms, events, - raw_or_label, event_name, export_survey_fields, - export_data_access_groups, export_checkbox_labels) - - str_keys = ('records', 'fields', 'forms', 'events', 'rawOrLabel', - 'eventName', 'exportSurveyFields', 'exportDataAccessGroups', - 'exportCheckboxLabel') + keys_to_add = ( + records, + fields, + forms, + events, + raw_or_label, + event_name, + export_survey_fields, + export_data_access_groups, + export_checkbox_labels, + ) + + str_keys = ( + "records", + "fields", + "forms", + "events", + "rawOrLabel", + "eventName", + "exportSurveyFields", + "exportDataAccessGroups", + "exportCheckboxLabel", + ) for key, data in zip(str_keys, keys_to_add): if data: - if key in ('fields', 'records', 'forms', 'events'): + if key in ("fields", "records", "forms", "events"): for i, value in enumerate(data): - pl["{}[{}]".format(key, i)] = value + payload["{}[{}]".format(key, i)] = value else: - pl[key] = data + payload[key] = data if date_begin: - pl["dateRangeBegin"] = date_begin.strftime('%Y-%m-%d %H:%M:%S') + payload["dateRangeBegin"] = date_begin.strftime("%Y-%m-%d %H:%M:%S") if date_end: - pl["dateRangeEnd"] = date_end.strftime('%Y-%m-%d %H:%M:%S') + payload["dateRangeEnd"] = date_end.strftime("%Y-%m-%d %H:%M:%S") if filter_logic: - pl["filterLogic"] = filter_logic - response, _ = self._call_api(pl, 'exp_record') - if format in ('json', 'csv', 'xml'): + payload["filterLogic"] = filter_logic + response, _ = self._call_api(payload, "exp_record") + if format in ("json", "csv", "xml"): return response - elif format == 'df': - if not df_kwargs: - if self.is_longitudinal(): - df_kwargs = {'index_col': [self.def_field, - 'redcap_event_name']} - else: - df_kwargs = {'index_col': self.def_field} - buf = StringIO(response) - df = self.read_csv(buf, **df_kwargs) - buf.close() - return df + if format != "df": + raise ValueError(("Unsupported format: '{}'").format(format)) + + if not df_kwargs: + if self.is_longitudinal(): + df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]} + else: + df_kwargs = {"index_col": self.def_field} + buf = StringIO(response) + dataframe = self.read_csv(buf, **df_kwargs) + buf.close() + + return dataframe - def read_csv(self, buf, **df_kwargs): + # pylint: enable=too-many-branches + # pylint: enable=too-many-locals + + # pylint: disable=import-outside-toplevel + @staticmethod + def read_csv(buf, **df_kwargs): """Wrapper around pandas read_csv that handles EmptyDataError""" from pandas import DataFrame, read_csv from pandas.errors import EmptyDataError try: - df = read_csv(buf, **df_kwargs) + dataframe = read_csv(buf, **df_kwargs) except EmptyDataError: - df = DataFrame() + dataframe = DataFrame() + + return dataframe - return df + # pylint: enable=import-outside-toplevel def metadata_type(self, field_name): """If the given field_name is validated by REDCap, return it's type""" - return self.__meta_metadata(field_name, - 'text_validation_type_or_show_slider_number') + return self.__meta_metadata( + field_name, "text_validation_type_or_show_slider_number" + ) def __meta_metadata(self, field, key): """Return the value for key for the field in the metadata""" - mf = '' + metadata_field = "" try: - mf = str([f[key] for f in self.metadata - if f['field_name'] == field][0]) + metadata_field = str( + [f[key] for f in self.metadata if f["field_name"] == field][0] + ) except IndexError: print("%s not in metadata field:%s" % (key, field)) - return mf + return metadata_field else: - return mf + return metadata_field def backfill_fields(self, fields, forms): """ @@ -477,12 +530,19 @@ def names_labels(self, do_print=False): """Simple helper function to get all field names and labels """ if do_print: for name, label in zip(self.field_names, self.field_labels): - print('%s --> %s' % (str(name), str(label))) + print("%s --> %s" % (str(name), str(label))) return self.field_names, self.field_labels - def import_records(self, to_import, overwrite='normal', format='json', - return_format='json', return_content='count', - date_format='YMD', force_auto_number=False): + def import_records( + self, + to_import, + overwrite="normal", + format="json", + return_format="json", + return_content="count", + date_format="YMD", + force_auto_number=False, + ): """ Import data into the RedCap Project @@ -525,36 +585,37 @@ def import_records(self, to_import, overwrite='normal', format='json', response : dict, str response from REDCap API, json-decoded if ``return_format`` == ``'json'`` """ - pl = self.__basepl('record') - if hasattr(to_import, 'to_csv'): + payload = self.__basepl("record") + # pylint: disable=comparison-with-callable + if hasattr(to_import, "to_csv"): # We'll assume it's a df buf = StringIO() if self.is_longitudinal(): - csv_kwargs = {'index_label': [self.def_field, - 'redcap_event_name']} + csv_kwargs = {"index_label": [self.def_field, "redcap_event_name"]} else: - csv_kwargs = {'index_label': self.def_field} + csv_kwargs = {"index_label": self.def_field} to_import.to_csv(buf, **csv_kwargs) - pl['data'] = buf.getvalue() + payload["data"] = buf.getvalue() buf.close() - format = 'csv' - elif format == 'json': - pl['data'] = json.dumps(to_import, separators=(',', ':')) + format = "csv" + elif format == "json": + payload["data"] = json.dumps(to_import, separators=(",", ":")) else: # don't do anything to csv/xml - pl['data'] = to_import - pl['overwriteBehavior'] = overwrite - pl['format'] = format - pl['returnFormat'] = return_format - pl['returnContent'] = return_content - pl['dateFormat'] = date_format - pl['forceAutoNumber'] = force_auto_number - response = self._call_api(pl, 'imp_record')[0] - if 'error' in response: + payload["data"] = to_import + # pylint: enable=comparison-with-callable + payload["overwriteBehavior"] = overwrite + payload["format"] = format + payload["returnFormat"] = return_format + payload["returnContent"] = return_content + payload["dateFormat"] = date_format + payload["forceAutoNumber"] = force_auto_number + response = self._call_api(payload, "imp_record")[0] + if "error" in response: raise RedcapError(str(response)) return response - def export_file(self, record, field, event=None, return_format='json'): + def export_file(self, record, field, event=None, return_format="json"): """ Export the contents of a file stored for a particular record @@ -582,28 +643,41 @@ def export_file(self, record, field, event=None, return_format='json'): """ self._check_file_field(field) # load up payload - pl = self.__basepl(content='file', format=return_format) + payload = self.__basepl(content="file", format=return_format) # there's no format field in this call - del pl['format'] - pl['returnFormat'] = return_format - pl['action'] = 'export' - pl['field'] = field - pl['record'] = record + del payload["format"] + payload["returnFormat"] = return_format + payload["action"] = "export" + payload["field"] = field + payload["record"] = record if event: - pl['event'] = event - content, headers = self._call_api(pl, 'exp_file') - #REDCap adds some useful things in content-type - if 'content-type' in headers: - splat = [kv.strip() for kv in headers['content-type'].split(';')] - kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv - in splat if '=' in kv] - content_map = dict(kv) + payload["event"] = event + content, headers = self._call_api(payload, "exp_file") + # REDCap adds some useful things in content-type + if "content-type" in headers: + splat = [ + key_values.strip() for key_values in headers["content-type"].split(";") + ] + key_values = [ + (key_values.split("=")[0], key_values.split("=")[1].replace('"', "")) + for key_values in splat + if "=" in key_values + ] + content_map = dict(key_values) else: content_map = {} return content, content_map - def import_file(self, record, field, fname, fobj, event=None, - repeat_instance=None, return_format='json'): + def import_file( + self, + record, + field, + fname, + fobj, + event=None, + repeat_instance=None, + return_format="json", + ): """ Import the contents of a file represented by fobj to a particular records field @@ -634,21 +708,21 @@ def import_file(self, record, field, fname, fobj, event=None, """ self._check_file_field(field) # load up payload - pl = self.__basepl(content='file', format=return_format) + payload = self.__basepl(content="file", format=return_format) # no format in this call - del pl['format'] - pl['returnFormat'] = return_format - pl['action'] = 'import' - pl['field'] = field - pl['record'] = record + del payload["format"] + payload["returnFormat"] = return_format + payload["action"] = "import" + payload["field"] = field + payload["record"] = record if event: - pl['event'] = event + payload["event"] = event if repeat_instance: - pl['repeat_instance'] = repeat_instance - file_kwargs = {'files': {'file': (fname, fobj)}} - return self._call_api(pl, 'imp_file', **file_kwargs)[0] + payload["repeat_instance"] = repeat_instance + file_kwargs = {"files": {"file": (fname, fobj)}} + return self._call_api(payload, "imp_file", **file_kwargs)[0] - def delete_file(self, record, field, return_format='json', event=None): + def delete_file(self, record, field, return_format="json", event=None): """ Delete a file from REDCap @@ -674,27 +748,27 @@ def delete_file(self, record, field, return_format='json', event=None): """ self._check_file_field(field) # Load up payload - pl = self.__basepl(content='file', format=return_format) - del pl['format'] - pl['returnFormat'] = return_format - pl['action'] = 'delete' - pl['record'] = record - pl['field'] = field + payload = self.__basepl(content="file", format=return_format) + del payload["format"] + payload["returnFormat"] = return_format + payload["action"] = "delete" + payload["record"] = record + payload["field"] = field if event: - pl['event'] = event - return self._call_api(pl, 'del_file')[0] + payload["event"] = event + return self._call_api(payload, "del_file")[0] def _check_file_field(self, field): """Check that field exists and is a file field""" is_field = field in self.field_names - is_file = self.__meta_metadata(field, 'field_type') == 'file' + is_file = self.__meta_metadata(field, "field_type") == "file" if not (is_field and is_file): msg = "'%s' is not a field or not a 'file' field" % field raise ValueError(msg) - else: - return True - def export_users(self, format='json'): + return True + + def export_users(self, format="json"): """ Export the users of the Project @@ -727,10 +801,10 @@ def export_users(self, format='json'): list of users dicts when ``'format'='json'``, otherwise a string """ - pl = self.__basepl(content='user', format=format) - return self._call_api(pl, 'exp_user')[0] + payload = self.__basepl(content="user", format=format) + return self._call_api(payload, "exp_user")[0] - def export_survey_participant_list(self, instrument, event=None, format='json'): + def export_survey_participant_list(self, instrument, event=None, format="json"): """ Export the Survey Participant List @@ -747,18 +821,19 @@ def export_survey_participant_list(self, instrument, event=None, format='json'): format: (json, xml, csv), json by default Format of returned data """ - pl = self.__basepl(content='participantList', format=format) - pl['instrument'] = instrument + payload = self.__basepl(content="participantList", format=format) + payload["instrument"] = instrument if event: - pl['event'] = event - return self._call_api(pl, 'exp_survey_participant_list') + payload["event"] = event + return self._call_api(payload, "exp_survey_participant_list") def generate_next_record_name(self): - pl = self.__basepl(content='generateNextRecordName') + """Return the next record name for auto-numbering records""" + payload = self.__basepl(content="generateNextRecordName") - return self._call_api(pl, 'exp_next_id')[0] + return self._call_api(payload, "exp_next_id")[0] - def export_project_info(self, format='json'): + def export_project_info(self, format="json"): """ Export Project Information @@ -768,14 +843,21 @@ def export_project_info(self, format='json'): Format of returned data """ - pl = self.__basepl(content='project', format=format) + payload = self.__basepl(content="project", format=format) - return self._call_api(pl, 'exp_proj')[0] + return self._call_api(payload, "exp_proj")[0] - def export_reports(self, format='json', report_id=None, - raw_or_label='raw', raw_or_label_headers='raw', - export_checkbox_labels='false', decimal_character=None, - df_kwargs=None): + # pylint: disable=too-many-locals + def export_reports( + self, + format="json", + report_id=None, + raw_or_label="raw", + raw_or_label_headers="raw", + export_checkbox_labels="false", + decimal_character=None, + df_kwargs=None, + ): """ Export a report of the Project @@ -822,28 +904,48 @@ def export_reports(self, format='json', report_id=None, """ ret_format = format - if format == 'df': - from pandas import read_csv - ret_format = 'csv' - pl = self.__basepl(content='report', format=ret_format) - keys_to_add = (report_id, raw_or_label, raw_or_label_headers, export_checkbox_labels, - decimal_character) - str_keys = ('report_id', 'rawOrLabel', 'rawOrLabelHeaders', 'exportCheckboxLabel', - 'decimalCharacter') + if format == "df": + ret_format = "csv" + + payload = self.__basepl(content="report", format=ret_format) + keys_to_add = ( + report_id, + raw_or_label, + raw_or_label_headers, + export_checkbox_labels, + decimal_character, + ) + str_keys = ( + "report_id", + "rawOrLabel", + "rawOrLabelHeaders", + "exportCheckboxLabel", + "decimalCharacter", + ) for key, data in zip(str_keys, keys_to_add): if data: - pl[key] = data - response, _ = self._call_api(pl, 'exp_report') - if format in ('json', 'csv', 'xml'): + payload[key] = data + response, _ = self._call_api(payload, "exp_report") + if format in ("json", "csv", "xml"): return response - elif format == 'df': - if not df_kwargs: - if self.is_longitudinal(): - df_kwargs = {'index_col': [self.def_field, - 'redcap_event_name']} - else: - df_kwargs = {'index_col': self.def_field} - buf = StringIO(response) - df = self.read_csv(buf, **df_kwargs) - buf.close() - return df + if format != "df": + raise ValueError(("Unsupported format: '{}'").format(format)) + + if not df_kwargs: + if self.is_longitudinal(): + df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]} + else: + df_kwargs = {"index_col": self.def_field} + buf = StringIO(response) + dataframe = self.read_csv(buf, **df_kwargs) + buf.close() + + return dataframe + + # pylint: enable=too-many-locals + + +# pylint: enable=too-many-instance-attributes +# pylint: enable=too-many-arguments +# pylint: enable=too-many-public-methods +# pylint: enable=redefined-builtin diff --git a/redcap/request.py b/redcap/request.py index f5e9714..3985791 100644 --- a/redcap/request.py +++ b/redcap/request.py @@ -1,30 +1,25 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - -__author__ = 'Scott Burns ' -__license__ = 'MIT' -__copyright__ = '2014, Vanderbilt University' - """ Low-level HTTP functionality """ -__author__ = 'Scott Burns' -__copyright__ = ' Copyright 2014, Vanderbilt University' - - -from requests import RequestException, Session import json +from requests import RequestException, Session + +__author__ = "Scott Burns " +__license__ = "MIT" +__copyright__ = "2014, Vanderbilt University" RedcapError = RequestException _session = Session() + class RCAPIError(Exception): """ Errors corresponding to a misuse of the REDCap API """ - pass class RCRequest(object): @@ -60,47 +55,90 @@ def __init__(self, url, payload, qtype, session=_session): if qtype: self.validate() - fmt_key = 'returnFormat' if 'returnFormat' in payload else 'format' + fmt_key = "returnFormat" if "returnFormat" in payload else "format" self.fmt = payload[fmt_key] def validate(self): """Checks that at least required params exist""" - required = ['token', 'content'] + required = ["token", "content"] valid_data = { - 'exp_record': (['type', 'format'], 'record', - 'Exporting record but content is not record'), - 'exp_field_names': (['format'], 'exportFieldNames', - 'Exporting field names, but content is not exportFieldNames'), - 'del_record': (['format'], 'record', - 'Deleting record but content is not record'), - 'imp_record': (['type', 'overwriteBehavior', 'data', 'format'], - 'record', 'Importing record but content is not record'), - 'metadata': (['format'], 'metadata', - 'Requesting metadata but content != metadata'), - 'exp_file': (['action', 'record', 'field'], 'file', - 'Exporting file but content is not file'), - 'imp_file': (['action', 'record', 'field'], 'file', - 'Importing file but content is not file'), - 'del_file': (['action', 'record', 'field'], 'file', - 'Deleteing file but content is not file'), - 'exp_event': (['format'], 'event', - 'Exporting events but content is not event'), - 'exp_arm': (['format'], 'arm', - 'Exporting arms but content is not arm'), - 'exp_fem': (['format'], 'formEventMapping', - 'Exporting form-event mappings but content != formEventMapping'), - 'exp_next_id': ([], 'generateNextRecordName', - 'Generating next record name but content is not generateNextRecordName'), - 'exp_proj': (['format'], 'project', - 'Exporting project info but content is not project'), - 'exp_user': (['format'], 'user', - 'Exporting users but content is not user'), - 'exp_survey_participant_list': (['instrument'], 'participantList', - 'Exporting Survey Participant List but content != participantList'), - 'exp_report': (['report_id', 'format'], 'report', - 'Exporting Reports but content is not reports'), - 'version': (['format'], 'version', - 'Requesting version but content != version') + "exp_record": ( + ["type", "format"], + "record", + "Exporting record but content is not record", + ), + "exp_field_names": ( + ["format"], + "exportFieldNames", + "Exporting field names, but content is not exportFieldNames", + ), + "del_record": ( + ["format"], + "record", + "Deleting record but content is not record", + ), + "imp_record": ( + ["type", "overwriteBehavior", "data", "format"], + "record", + "Importing record but content is not record", + ), + "metadata": ( + ["format"], + "metadata", + "Requesting metadata but content != metadata", + ), + "exp_file": ( + ["action", "record", "field"], + "file", + "Exporting file but content is not file", + ), + "imp_file": ( + ["action", "record", "field"], + "file", + "Importing file but content is not file", + ), + "del_file": ( + ["action", "record", "field"], + "file", + "Deleteing file but content is not file", + ), + "exp_event": ( + ["format"], + "event", + "Exporting events but content is not event", + ), + "exp_arm": (["format"], "arm", "Exporting arms but content is not arm"), + "exp_fem": ( + ["format"], + "formEventMapping", + "Exporting form-event mappings but content != formEventMapping", + ), + "exp_next_id": ( + [], + "generateNextRecordName", + "Generating next record name but content is not generateNextRecordName", + ), + "exp_proj": ( + ["format"], + "project", + "Exporting project info but content is not project", + ), + "exp_user": (["format"], "user", "Exporting users but content is not user"), + "exp_survey_participant_list": ( + ["instrument"], + "participantList", + "Exporting Survey Participant List but content != participantList", + ), + "exp_report": ( + ["report_id", "format"], + "report", + "Exporting Reports but content is not reports", + ), + "version": ( + ["format"], + "version", + "Requesting version but content != version", + ), } extra, req_content, err_msg = valid_data[self.type] required.extend(extra) @@ -110,13 +148,13 @@ def validate(self): if not set(required) <= pl_keys: # what is not in pl_keys? not_pre = required - pl_keys - raise RCAPIError("Required keys: %s" % ', '.join(not_pre)) + raise RCAPIError("Required keys: %s" % ", ".join(not_pre)) # Check content, raise with err_msg if not good try: - if self.payload['content'] != req_content: + if self.payload["content"] != req_content: raise RCAPIError(err_msg) - except KeyError: - raise RCAPIError('content not in payload') + except KeyError as key_fail: + raise RCAPIError("content not in payload") from key_fail def execute(self, **kwargs): """Execute the API request and return data @@ -138,33 +176,37 @@ def execute(self, **kwargs): content = self.get_content(response) return content, response.headers + # pylint: disable=invalid-name def get_content(self, r): """Abstraction for grabbing content from a returned response""" - if self.type == 'exp_file': + if self.type == "exp_file": # don't use the decoded r.text return r.content - elif self.type == 'version': + if self.type == "version": return r.content - else: - if self.fmt == 'json': - content = {} - # Decode - try: - # Watch out for bad/empty json - content = json.loads(r.text, strict=False) - except ValueError as e: - if not self.expect_empty_json(): - # reraise for requests that shouldn't send empty json - raise ValueError(e) - finally: - return content - else: - return r.text + # pylint: disable=lost-exception + if self.fmt == "json": + content = {} + # Decode + try: + # Watch out for bad/empty json + content = json.loads(r.text, strict=False) + except ValueError as e: + if not self.expect_empty_json(): + # reraise for requests that shouldn't send empty json + raise ValueError(e) from e + finally: + return content + # pylint: enable=lost-exception + return r.text + + # pylint: enable=invalid-name def expect_empty_json(self): """Some responses are known to send empty responses""" - return self.type in ('imp_file', 'del_file') + return self.type in ("imp_file", "del_file") + # pylint: disable=invalid-name def raise_for_status(self, r): """Given a response, raise for bad status for certain actions @@ -174,12 +216,14 @@ def raise_for_status(self, r): Raising for everything wouldn't let the user see the (hopefully helpful) error message""" - if self.type in ('metadata', 'exp_file', 'imp_file', 'del_file'): + if self.type in ("metadata", "exp_file", "imp_file", "del_file"): r.raise_for_status() # see http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html # specifically 10.5 if 500 <= r.status_code < 600: raise RedcapError(r.content) - if 400 == r.status_code and self.type == 'exp_record': + if r.status_code == 400 and self.type == "exp_record": raise RedcapError(r.content) + + # pylint: enable=invalid-name diff --git a/requirements.txt b/requirements.txt index 84253b4..08338de 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,12 @@ -Sphinx==1.8.3 -nose==1.2.1 -numpydoc==0.8 -semantic-version==2.3.1 -requests>=1.1.0 -wheel==0.22.0 -responses==0.9.0 +# +# This file is autogenerated by pip-compile +# To update, run: +# +# pip-compile +# +certifi==2020.6.20 # via requests +chardet==3.0.4 # via requests +idna==2.10 # via requests +requests==2.24.0 # via PyCap (setup.py) +semantic-version==2.3.1 # via PyCap (setup.py) +urllib3==1.25.11 # via requests diff --git a/setup.py b/setup.py index 3736516..e8b4ef0 100644 --- a/setup.py +++ b/setup.py @@ -1,13 +1,21 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -__author__ = 'Scott Burns ' -__license__ = 'MIT' -__copyright__ = '2014, Vanderbilt University' +__author__ = "Scott Burns " +__license__ = "MIT" +__copyright__ = "2014, Vanderbilt University" import codecs import os import re +import sys +import warnings + +if sys.version_info[0] < 3: + warnings.warn( + "Support Python 2 has been deprecated, and will be removed " + " in a future major release. Please upgrade to Python 3." + ) # Taken from vulture setup.py: https://github.com/jendrikseipp/vulture/blob/master/setup.py def read(*parts): @@ -15,55 +23,60 @@ def read(*parts): with codecs.open(os.path.join(here, *parts), "r") as f: return f.read() + def find_version(*file_parts): version_file = read(*file_parts) - version_match = re.search( - r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M - ) + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M) if version_match: return version_match.group(1) raise RuntimeError("Unable to find version string.") + try: from setuptools import setup except ImportError: from distutils.core import setup -required = [ - 'requests>=1.0.0', - 'semantic-version>=2.3.1' -] +required = ["requests>=1.0.0", "semantic-version>=2.3.1"] -if __name__ == '__main__': - if os.path.exists('MANIFEST'): - os.remove('MANIFEST') +if os.path.exists("MANIFEST"): + os.remove("MANIFEST") - long_desc = open('README.rst').read() + '\n\n' + open('HISTORY.rst').read() +long_desc = open("README.rst").read() + "\n\n" + open("HISTORY.rst").read() - setup(name='PyCap', - maintainer='Scott Burns', - maintainer_email='scott.s.burns@gmail.com', - description="""PyCap: Python interface to REDCap""", - license='MIT', - url='http://sburns.github.com/PyCap', - version=find_version("redcap", "__init__.py"), - download_url='http://sburns.github.com/PyCap', - long_description=long_desc, - packages=['redcap'], - install_requires=required, - platforms='any', - classifiers=( - 'Development Status :: 5 - Production/Stable', - 'Intended Audience :: Developers', - 'Intended Audience :: Science/Research', - 'License :: OSI Approved :: MIT License', - 'License :: OSI Approved', - 'Topic :: Software Development', - 'Topic :: Scientific/Engineering', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Operating System :: Unix', - 'Operating System :: MacOS', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7',) - ) +setup( + name="PyCap", + maintainer="Scott Burns", + maintainer_email="scott.s.burns@gmail.com", + description="""PyCap: Python interface to REDCap""", + license="MIT", + url="http://sburns.github.com/PyCap", + version=find_version("redcap", "__init__.py"), + download_url="http://sburns.github.com/PyCap", + long_description=long_desc, + long_description_content_type="text/x-rst", + packages=["redcap"], + install_requires=required, + extras_require={ + "pandas": ["pandas>=0.25.0"], + # eg: + # 'rst': ['docutils>=0.11'], + # ':python_version=="2.6"': ['argparse'], + }, + platforms="any", + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "License :: OSI Approved", + "Topic :: Software Development", + "Topic :: Scientific/Engineering", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Operating System :: MacOS", + "Programming Language :: Python", + "Programming Language :: Python :: 2.7", + ], +) diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/test/test_project.py b/test/test_project.py index 94ddf8a..1d1f53f 100644 --- a/test/test_project.py +++ b/test/test_project.py @@ -1,49 +1,65 @@ #! /usr/bin/env python +# pylint: disable=missing-function-docstring +# pylint: disable=protected-access +"""Test suite for Project class""" +import os import unittest -import responses import json -from redcap import Project, RedcapError + +import responses import semantic_version -skip_pd = False +from redcap import Project, RedcapError + +SKIP_PD = False try: import pandas as pd except ImportError: - skip_pd = True + SKIP_PD = True try: import urlparse except ImportError: import urllib.parse as urlparse +# pylint: disable=pointless-statement try: basestring # attempt to evaluate basestring - def is_str(s): - return isinstance(s, basestring) - def is_bytestring(s): - return isinstance(s, basestring) + + def is_str(string): + return isinstance(string, basestring) + + def is_bytestring(string): + return isinstance(string, basestring) + + +# pylint: enable=pointless-statement + except NameError: - def is_str(s): - return isinstance(s, str) - def is_bytestring(s): - return isinstance(s, bytes) + + def is_str(string): + return isinstance(string, str) + + def is_bytestring(string): + return isinstance(string, bytes) + try: from unittest import mock except ImportError: import mock - +# pylint: disable=too-many-public-methods class ProjectTests(unittest.TestCase): """docstring for ProjectTests""" - long_proj_url = 'https://redcap.longproject.edu/api/' - normal_proj_url = 'https://redcap.normalproject.edu/api/' - ssl_proj_url = 'https://redcap.sslproject.edu/api/' - survey_proj_url = 'https://redcap.surveyproject.edu/api/' - bad_url = 'https://redcap.badproject.edu/api' - reg_token = 'supersecrettoken' + long_proj_url = "https://redcap.longproject.edu/api/" + normal_proj_url = "https://redcap.normalproject.edu/api/" + ssl_proj_url = "https://redcap.sslproject.edu/api/" + survey_proj_url = "https://redcap.surveyproject.edu/api/" + bad_url = "https://redcap.badproject.edu/api" + reg_token = "supersecrettoken" def setUp(self): self.create_projects() @@ -62,34 +78,31 @@ def request_callback_long(request): if "returnContent" in data: resp = {"count": 1} - elif (request_type == "metadata"): - resp = [{ - 'field_name': 'record_id', - 'field_label': 'Record ID', - 'form_name': 'Test Form', - "arm_num": 1, - "name": "test" - }] - elif (request_type == "version"): - resp = b'8.6.0' - headers = {'content-type': 'text/csv; charset=utf-8'} + elif request_type == "metadata": + resp = [ + { + "field_name": "record_id", + "field_label": "Record ID", + "form_name": "Test Form", + "arm_num": 1, + "name": "test", + } + ] + elif request_type == "version": + resp = b"8.6.0" + headers = {"content-type": "text/csv; charset=utf-8"} return (201, headers, resp) - elif (request_type == "event"): - resp = [{ - 'unique_event_name': "raw" - }] - elif (request_type == "arm"): - resp = [{ - "arm_num": 1, - "name": "test" - }] - elif (request_type in ["record", "formEventMapping"]): + elif request_type == "event": + resp = [{"unique_event_name": "raw"}] + elif request_type == "arm": + resp = [{"arm_num": 1, "name": "test"}] + elif request_type in ["record", "formEventMapping"]: if "csv" in data["format"]: resp = "record_id,test,redcap_event_name\n1,1,raw" - headers = {'content-type': 'text/csv; charset=utf-8'} + headers = {"content-type": "text/csv; charset=utf-8"} return (201, headers, resp) - else: - resp = [{"field_name":"record_id"}, {"field_name":"test"}] + + resp = [{"field_name": "record_id"}, {"field_name": "test"}] return (201, headers, json.dumps(resp)) @@ -100,6 +113,7 @@ def request_callback_long(request): content_type="application/json", ) + # pylint: disable=too-many-branches def add_normalproject_response(self): def request_callback_normal(request): parsed = urlparse.urlparse("?{}".format(request.body)) @@ -111,98 +125,101 @@ def request_callback_normal(request): if " filename" in data: resp = {} else: - request_type = data.get("content", ['unknown'])[0] + request_type = data.get("content", ["unknown"])[0] if "returnContent" in data: if "non_existent_key" in data["data"][0]: resp = {"error": "invalid field"} else: resp = {"count": 1} - elif (request_type == "metadata"): + elif request_type == "metadata": if "csv" in data["format"]: - resp = "field_name,field_label,form_name,arm_num,name\n"\ + resp = ( + "field_name,field_label,form_name,arm_num,name\n" "record_id,Record ID,Test Form,1,test\n" - headers = {'content-type': 'text/csv; charset=utf-8'} + ) + headers = {"content-type": "text/csv; charset=utf-8"} return (201, headers, resp) - else: - resp = [{ - 'field_name': 'record_id', - 'field_label': 'Record ID', - 'form_name': 'Test Form', + resp = [ + { + "field_name": "record_id", + "field_label": "Record ID", + "form_name": "Test Form", "arm_num": 1, "name": "test", "field_type": "text", - }, { - 'field_name': 'file', - 'field_label': 'File', - 'form_name': 'Test Form', + }, + { + "field_name": "file", + "field_label": "File", + "form_name": "Test Form", "arm_num": 1, "name": "file", "field_type": "file", - }, { - 'field_name': 'dob', - 'field_label': 'Date of Birth', - 'form_name': 'Test Form', + }, + { + "field_name": "dob", + "field_label": "Date of Birth", + "form_name": "Test Form", "arm_num": 1, "name": "dob", "field_type": "date", - }] - elif (request_type == "version"): - resp = { - 'error': "no version info" - } - elif (request_type == "event"): - resp = { - 'error': "no events" - } - elif (request_type == "arm"): - resp = { - 'error': "no arm" - } - elif (request_type == "record"): + }, + ] + elif request_type == "version": + resp = {"error": "no version info"} + elif request_type == "event": + resp = {"error": "no events"} + elif request_type == "arm": + resp = {"error": "no arm"} + elif request_type == "record": if "csv" in data["format"]: resp = "record_id,test,first_name,study_id\n1,1,Peter,1" - headers = {'content-type': 'text/csv; charset=utf-8'} + headers = {"content-type": "text/csv; charset=utf-8"} return (201, headers, resp) - elif "exportDataAccessGroups" in data: + + if "exportDataAccessGroups" in data: resp = [ - {"field_name":"record_id", "redcap_data_access_group": "group1"}, - {"field_name":"test", "redcap_data_access_group": "group1"} + { + "field_name": "record_id", + "redcap_data_access_group": "group1", + }, + { + "field_name": "test", + "redcap_data_access_group": "group1", + }, ] elif "label" in data.get("rawOrLabel"): resp = [{"matcheck1___1": "Foo"}] else: resp = [ {"record_id": "1", "test": "test1"}, - {"record_id": "2", "test": "test"} + {"record_id": "2", "test": "test"}, ] - elif (request_type == "file"): + elif request_type == "file": resp = {} headers["content-type"] = "text/plain;name=data.txt" - elif (request_type == "user"): + elif request_type == "user": resp = [ { - 'firstname': "test", - 'lastname': "test", - 'email': "test", - 'username': "test", - 'expiration': "test", - 'data_access_group': "test", - 'data_export': "test", - 'forms': "test" + "firstname": "test", + "lastname": "test", + "email": "test", + "username": "test", + "expiration": "test", + "data_access_group": "test", + "data_export": "test", + "forms": "test", } ] - elif (request_type == "generateNextRecordName"): + elif request_type == "generateNextRecordName": resp = 123 - elif (request_type == "project"): - resp = { - 'project_id': 123 - } + elif request_type == "project": + resp = {"project_id": 123} self.assertIsNotNone( - resp, - msg="No response for request_type '{}'".format(request_type) + resp, msg="No response for request_type '{}'".format(request_type) ) return (201, headers, json.dumps(resp)) @@ -214,32 +231,30 @@ def request_callback_normal(request): content_type="application/json", ) + # pylint: enable=too-many-branches + def add_ssl_project(self): def request_callback_ssl(request): parsed = urlparse.urlparse("?{}".format(request.body)) data = urlparse.parse_qs(parsed.query) request_type = data["content"][0] - if (request_type == "metadata"): - resp = [{ - 'field_name': 'record_id', - 'field_label': 'Record ID', - 'form_name': 'Test Form', - "arm_num": 1, - "name": "test" - }] - if (request_type == "version"): - resp = { - 'error': "no version info" - } - if (request_type == "event"): - resp = { - 'error': "no events" - } - if (request_type == "arm"): - resp = { - 'error': "no arm" - } + if request_type == "metadata": + resp = [ + { + "field_name": "record_id", + "field_label": "Record ID", + "form_name": "Test Form", + "arm_num": 1, + "name": "test", + } + ] + if request_type == "version": + resp = {"error": "no version info"} + if request_type == "event": + resp = {"error": "no events"} + if request_type == "arm": + resp = {"error": "no arm"} headers = {"Content-Type": "application/json"} return (201, headers, json.dumps(resp)) @@ -257,30 +272,34 @@ def request_callback_survey(request): data = urlparse.parse_qs(parsed.query) request_type = data["content"][0] - if (request_type == "metadata"): - resp = [{ - 'field_name': 'record_id', - 'field_label': 'Record ID', - 'form_name': 'Test Form', - "arm_num": 1, - "name": "test" - }] - elif (request_type == "version"): - resp = { - 'error': "no version info" - } - elif (request_type == "event"): - resp = { - 'error': "no events" - } - elif (request_type == "arm"): - resp = { - 'error': "no arm" - } - elif (request_type == "record"): + if request_type == "metadata": resp = [ - {"field_name":"record_id", "redcap_survey_identifier": "test", "demographics_timestamp": "a_real_date"}, - {"field_name":"test", "redcap_survey_identifier": "test", "demographics_timestamp": "a_real_date"} + { + "field_name": "record_id", + "field_label": "Record ID", + "form_name": "Test Form", + "arm_num": 1, + "name": "test", + } + ] + elif request_type == "version": + resp = {"error": "no version info"} + elif request_type == "event": + resp = {"error": "no events"} + elif request_type == "arm": + resp = {"error": "no arm"} + elif request_type == "record": + resp = [ + { + "field_name": "record_id", + "redcap_survey_identifier": "test", + "demographics_timestamp": "a_real_date", + }, + { + "field_name": "test", + "redcap_survey_identifier": "test", + "demographics_timestamp": "a_real_date", + }, ] headers = {"Content-Type": "application/json"} @@ -305,7 +324,6 @@ def create_projects(self): self.ssl_proj = Project(self.ssl_proj_url, self.reg_token, verify_ssl=False) self.survey_proj = Project(self.survey_proj_url, self.reg_token) - def test_good_init(self): """Ensure basic instantiation """ @@ -316,8 +334,16 @@ def test_good_init(self): def test_normal_attrs(self): """Ensure projects are created with all normal attrs""" - for attr in ('metadata', 'field_names', 'field_labels', 'forms', - 'events', 'arm_names', 'arm_nums', 'def_field'): + for attr in ( + "metadata", + "field_names", + "field_labels", + "forms", + "events", + "arm_names", + "arm_nums", + "def_field", + ): self.assertTrue(hasattr(self.reg_proj, attr)) def test_long_attrs(self): @@ -334,7 +360,7 @@ def test_is_longitudinal(self): def test_regular_attrs(self): """proj.events/arm_names/arm_nums should be empty tuples""" - for attr in 'events', 'arm_names', 'arm_nums': + for attr in "events", "arm_names", "arm_nums": attr_obj = getattr(self.reg_proj, attr) self.assertIsNotNone(attr_obj) self.assertEqual(len(attr_obj), 0) @@ -353,7 +379,7 @@ def test_long_export(self): """After determining a unique event name, make sure we get a list of dicts""" self.add_long_project_response() - unique_event = self.long_proj.events[0]['unique_event_name'] + unique_event = self.long_proj.events[0]["unique_event_name"] data = self.long_proj.export_records(events=[unique_event]) self.assertIsInstance(data, list) for record in data: @@ -365,21 +391,22 @@ def test_import_records(self): self.add_normalproject_response() data = self.reg_proj.export_records() response = self.reg_proj.import_records(data) - self.assertIn('count', response) - self.assertNotIn('error', response) + self.assertIn("count", response) + self.assertNotIn("error", response) @responses.activate def test_import_exception(self): "Test record import throws RedcapError for bad import" self.add_normalproject_response() data = self.reg_proj.export_records() - data[0]['non_existent_key'] = 'foo' - with self.assertRaises(RedcapError) as cm: + data[0]["non_existent_key"] = "foo" + with self.assertRaises(RedcapError) as assert_context: self.reg_proj.import_records(data) - exc = cm.exception - self.assertIn('error', exc.args[0]) + exc = assert_context.exception + self.assertIn("error", exc.args[0]) - def is_good_csv(self, csv_string): + @staticmethod + def is_good_csv(csv_string): "Helper to test csv strings" return is_str(csv_string) @@ -387,45 +414,45 @@ def is_good_csv(self, csv_string): def test_csv_export(self): """Test valid csv export """ self.add_normalproject_response() - csv = self.reg_proj.export_records(format='csv') + csv = self.reg_proj.export_records(format="csv") self.assertTrue(self.is_good_csv(csv)) @responses.activate def test_metadata_export(self): """Test valid metadata csv export""" self.add_normalproject_response() - csv = self.reg_proj.export_metadata(format='csv') + csv = self.reg_proj.export_metadata(format="csv") self.assertTrue(self.is_good_csv(csv)) def test_metadata_export_passes_filters_as_arrays(self): self.reg_proj._call_api = mock.Mock() self.reg_proj._call_api.return_value = (None, None) self.reg_proj.export_metadata( - fields=['field0', 'field1', 'field2'], - forms=['form0', 'form1', 'form2'], + fields=["field0", "field1", "field2"], + forms=["form0", "form1", "form2"], ) args, _ = self.reg_proj._call_api.call_args payload = args[0] - self.assertEqual(payload['fields[0]'], 'field0') - self.assertEqual(payload['fields[1]'], 'field1') - self.assertEqual(payload['fields[2]'], 'field2') - self.assertEqual(payload['forms[2]'], 'form2') + self.assertEqual(payload["fields[0]"], "field0") + self.assertEqual(payload["fields[1]"], "field1") + self.assertEqual(payload["fields[2]"], "field2") + self.assertEqual(payload["forms[2]"], "form2") def test_bad_creds(self): "Test that exceptions are raised with bad URL or tokens" with self.assertRaises(RedcapError): Project(self.bad_url, self.reg_token) with self.assertRaises(RedcapError): - Project(self.bad_url, '1') + Project(self.bad_url, "1") @responses.activate def test_fem_export(self): """ Test fem export in json format gives list of dicts""" self.add_long_project_response() - fem = self.long_proj.export_fem(format='json') + fem = self.long_proj.export_fem(format="json") self.assertIsInstance(fem, list) for arm in fem: self.assertIsInstance(arm, dict) @@ -434,44 +461,44 @@ def test_fem_export_passes_filters_as_arrays(self): self.reg_proj._call_api = mock.Mock() self.reg_proj._call_api.return_value = (None, None) self.reg_proj.export_fem( - arms=['arm0', 'arm1', 'arm2'], + arms=["arm0", "arm1", "arm2"], ) args, _ = self.reg_proj._call_api.call_args payload = args[0] - self.assertEqual(payload['arms[0]'], 'arm0') - self.assertEqual(payload['arms[1]'], 'arm1') - self.assertEqual(payload['arms[2]'], 'arm2') + self.assertEqual(payload["arms[0]"], "arm0") + self.assertEqual(payload["arms[1]"], "arm1") + self.assertEqual(payload["arms[2]"], "arm2") @responses.activate def test_file_export(self): """Test file export and proper content-type parsing""" self.add_normalproject_response() - record, field = '1', 'file' - #Upload first to make sure file is there + record, field = "1", "file" + # Upload first to make sure file is there self.import_file() # Now export it content, headers = self.reg_proj.export_file(record, field) self.assertTrue(is_bytestring(content)) # We should at least get the filename in the headers - for key in ['name']: + for key in ["name"]: self.assertIn(key, headers) # needs to raise ValueError for exporting non-file fields with self.assertRaises(ValueError): - self.reg_proj.export_file(record=record, field='dob') + self.reg_proj.export_file(record=record, field="dob") def import_file(self): upload_fname = self.upload_fname() - with open(upload_fname, 'r') as fobj: - response = self.reg_proj.import_file('1', 'file', upload_fname, fobj) + with open(upload_fname, "r") as fobj: + response = self.reg_proj.import_file("1", "file", upload_fname, fobj) return response - def upload_fname(self): - import os - this_dir, this_fname = os.path.split(__file__) - return os.path.join(this_dir, 'data.txt') + @staticmethod + def upload_fname(): + this_dir, _ = os.path.split(__file__) + return os.path.join(this_dir, "data.txt") @responses.activate def test_file_import(self): @@ -482,13 +509,12 @@ def test_file_import(self): response = self.import_file() except RedcapError: self.fail("Shouldn't throw RedcapError for successful imports") - self.assertTrue('error' not in response) + self.assertTrue("error" not in response) # Test importing a file to a non-file field raises a ValueError fname = self.upload_fname() - with open(fname, 'r') as fobj: + with open(fname, "r") as fobj: with self.assertRaises(ValueError): - response = self.reg_proj.import_file('1', 'first_name', - fname, fobj) + response = self.reg_proj.import_file("1", "first_name", fname, fobj) @responses.activate def test_file_delete(self): @@ -496,7 +522,7 @@ def test_file_delete(self): self.add_normalproject_response() # make sure deleting doesn't raise try: - self.reg_proj.delete_file('1', 'file') + self.reg_proj.delete_file("1", "file") except RedcapError: self.fail("Shouldn't throw RedcapError for successful deletes") @@ -507,9 +533,16 @@ def test_user_export(self): users = self.reg_proj.export_users() # A project must have at least one user self.assertTrue(len(users) > 0) - req_keys = ['firstname', 'lastname', 'email', 'username', - 'expiration', 'data_access_group', 'data_export', - 'forms'] + req_keys = [ + "firstname", + "lastname", + "email", + "username", + "expiration", + "data_access_group", + "data_export", + "forms", + ] for user in users: for key in req_keys: self.assertIn(key, user) @@ -518,12 +551,12 @@ def test_verify_ssl(self): """Test argument making for SSL verification""" # Test we won't verify SSL cert for non-verified project post_kwargs = self.ssl_proj._kwargs() - self.assertIn('verify', post_kwargs) - self.assertFalse(post_kwargs['verify']) + self.assertIn("verify", post_kwargs) + self.assertFalse(post_kwargs["verify"]) # Test we do verify SSL cert in normal project post_kwargs = self.reg_proj._kwargs() - self.assertIn('verify', post_kwargs) - self.assertTrue(post_kwargs['verify']) + self.assertIn("verify", post_kwargs) + self.assertTrue(post_kwargs["verify"]) @responses.activate def test_export_data_access_groups(self): @@ -531,11 +564,11 @@ def test_export_data_access_groups(self): self.add_normalproject_response() records = self.reg_proj.export_records(export_data_access_groups=True) for record in records: - self.assertIn('redcap_data_access_group', record) + self.assertIn("redcap_data_access_group", record) # When not passed, that key shouldn't be there records = self.reg_proj.export_records() for record in records: - self.assertNotIn('redcap_data_access_group', record) + self.assertNotIn("redcap_data_access_group", record) @responses.activate def test_export_survey_fields(self): @@ -549,89 +582,91 @@ def test_export_survey_fields(self): self.add_normalproject_response() records = self.survey_proj.export_records(export_survey_fields=True) for record in records: - self.assertIn('redcap_survey_identifier', record) - self.assertIn('demographics_timestamp', record) + self.assertIn("redcap_survey_identifier", record) + self.assertIn("demographics_timestamp", record) # The regular project doesn't have a survey setup. Users should # be able this argument as True but it winds up a no-op. records = self.reg_proj.export_records(export_survey_fields=True) for record in records: - self.assertNotIn('redcap_survey_identifier', record) - self.assertNotIn('demographics_timestamp', record) + self.assertNotIn("redcap_survey_identifier", record) + self.assertNotIn("demographics_timestamp", record) - @unittest.skipIf(skip_pd, "Couldn't import pandas") + @unittest.skipIf(SKIP_PD, "Couldn't import pandas") @responses.activate def test_metadata_to_df(self): """Test metadata export --> DataFrame""" self.add_normalproject_response() - df = self.reg_proj.export_metadata(format='df') - self.assertIsInstance(df, pd.DataFrame) + dataframe = self.reg_proj.export_metadata(format="df") + self.assertIsInstance(dataframe, pd.DataFrame) - @unittest.skipIf(skip_pd, "Couldn't import pandas") + @unittest.skipIf(SKIP_PD, "Couldn't import pandas") @responses.activate def test_export_to_df(self): """Test export --> DataFrame""" self.add_normalproject_response() self.add_long_project_response() - df = self.reg_proj.export_records(format='df') - self.assertIsInstance(df, pd.DataFrame) + dataframe = self.reg_proj.export_records(format="df") + self.assertIsInstance(dataframe, pd.DataFrame) # Test it's a normal index - self.assertTrue(hasattr(df.index, 'name')) + self.assertTrue(hasattr(dataframe.index, "name")) # Test for a MultiIndex on longitudinal df - long_df = self.long_proj.export_records(format='df', event_name='raw') - self.assertTrue(hasattr(long_df.index, 'names')) + long_dataframe = self.long_proj.export_records(format="df", event_name="raw") + self.assertTrue(hasattr(long_dataframe.index, "names")) - @unittest.skipIf(skip_pd, "Couldn't import pandas") + @unittest.skipIf(SKIP_PD, "Couldn't import pandas") @responses.activate def test_export_df_kwargs(self): """Test passing kwargs to export DataFrame construction""" self.add_normalproject_response() - df = self.reg_proj.export_records(format='df', - df_kwargs={'index_col': 'first_name'}) - self.assertEqual(df.index.name, 'first_name') - self.assertTrue('study_id' in df) + dataframe = self.reg_proj.export_records( + format="df", df_kwargs={"index_col": "first_name"} + ) + self.assertEqual(dataframe.index.name, "first_name") + self.assertTrue("study_id" in dataframe) - @unittest.skipIf(skip_pd, "Couldn't import pandas") + @unittest.skipIf(SKIP_PD, "Couldn't import pandas") @responses.activate def test_metadata_df_kwargs(self): """Test passing kwargs to metadata DataFrame construction""" self.add_normalproject_response() - df = self.reg_proj.export_metadata(format='df', - df_kwargs={'index_col': 'field_label'}) - self.assertEqual(df.index.name, 'field_label') - self.assertTrue('field_name' in df) + dataframe = self.reg_proj.export_metadata( + format="df", df_kwargs={"index_col": "field_label"} + ) + self.assertEqual(dataframe.index.name, "field_label") + self.assertTrue("field_name" in dataframe) - @unittest.skipIf(skip_pd, "Couldn't import pandas") + @unittest.skipIf(SKIP_PD, "Couldn't import pandas") @responses.activate def test_import_dataframe(self): """Test importing a pandas.DataFrame""" self.add_normalproject_response() self.add_long_project_response() - df = self.reg_proj.export_records(format='df') - response = self.reg_proj.import_records(df) - self.assertIn('count', response) - self.assertNotIn('error', response) - long_df = self.long_proj.export_records(event_name='raw', format='df') - response = self.long_proj.import_records(long_df) - self.assertIn('count', response) - self.assertNotIn('error', response) + dataframe = self.reg_proj.export_records(format="df") + response = self.reg_proj.import_records(dataframe) + self.assertIn("count", response) + self.assertNotIn("error", response) + long_dataframe = self.long_proj.export_records(event_name="raw", format="df") + response = self.long_proj.import_records(long_dataframe) + self.assertIn("count", response) + self.assertNotIn("error", response) def test_export_records_handles_empty_data_error(self): self.reg_proj._call_api = mock.Mock() self.reg_proj._call_api.return_value = "\n", {} - df = self.reg_proj.export_records(format='df') - self.assertTrue(df.empty) + dataframe = self.reg_proj.export_records(format="df") + self.assertTrue(dataframe.empty) def test_export_fem_handles_empty_data_error(self): self.reg_proj._call_api = mock.Mock() self.reg_proj._call_api.return_value = "\n", {} - df = self.reg_proj.export_fem(format='df') - self.assertTrue(df.empty) + dataframe = self.reg_proj.export_fem(format="df") + self.assertTrue(dataframe.empty) def test_export_metadata_handles_empty_data_error(self): self.reg_proj._call_api = mock.Mock() self.reg_proj._call_api.return_value = "\n", {} - df = self.reg_proj.export_metadata(format='df') - self.assertTrue(df.empty) + dataframe = self.reg_proj.export_metadata(format="df") + self.assertTrue(dataframe.empty) @responses.activate def test_date_formatting(self): @@ -639,26 +674,29 @@ def test_date_formatting(self): self.add_normalproject_response() def import_factory(date_string): - return [{'study_id': '1', - 'dob': date_string}] + return [{"study_id": "1", "dob": date_string}] # Default YMD with dashes - import_ymd = import_factory('2000-01-01') + import_ymd = import_factory("2000-01-01") response = self.reg_proj.import_records(import_ymd) - self.assertEqual(response['count'], 1) + self.assertEqual(response["count"], 1) # DMY with / - import_dmy = import_factory('31/01/2000') - response = self.reg_proj.import_records(import_dmy, date_format='DMY') - self.assertEqual(response['count'], 1) + import_dmy = import_factory("31/01/2000") + response = self.reg_proj.import_records(import_dmy, date_format="DMY") + self.assertEqual(response["count"], 1) - import_mdy = import_factory('12/31/2000') - response = self.reg_proj.import_records(import_mdy, date_format='MDY') - self.assertEqual(response['count'], 1) + import_mdy = import_factory("12/31/2000") + response = self.reg_proj.import_records(import_mdy, date_format="MDY") + self.assertEqual(response["count"], 1) def test_get_version(self): """Testing retrieval of REDCap version associated with Project""" - self.assertTrue(isinstance(semantic_version.Version('1.0.0'), type(self.long_proj.redcap_version))) + self.assertTrue( + isinstance( + semantic_version.Version("1.0.0"), type(self.long_proj.redcap_version) + ) + ) @responses.activate def test_export_checkbox_labels(self): @@ -666,26 +704,26 @@ def test_export_checkbox_labels(self): self.add_normalproject_response() self.assertEqual( self.reg_proj.export_records( - raw_or_label='label', - export_checkbox_labels=True)[0]['matcheck1___1'], - 'Foo' + raw_or_label="label", export_checkbox_labels=True + )[0]["matcheck1___1"], + "Foo", ) @responses.activate def test_export_always_include_def_field(self): - """ Ensure def_field always comes in the output even if not explicity - given in a requested form """ + """Ensure def_field always comes in the output even if not explicity + given in a requested form""" self.add_normalproject_response() # If we just ask for a form, must also get def_field in there - records = self.reg_proj.export_records(forms=['imaging']) + records = self.reg_proj.export_records(forms=["imaging"]) for record in records: self.assertIn(self.reg_proj.def_field, record) # , still need it def_field even if not asked for in form and fields - records = self.reg_proj.export_records(forms=['imaging'], fields=['foo_score']) + records = self.reg_proj.export_records(forms=["imaging"], fields=["foo_score"]) for record in records: self.assertIn(self.reg_proj.def_field, record) # If we just ask for some fields, still need def_field - records = self.reg_proj.export_records(fields=['foo_score']) + records = self.reg_proj.export_records(fields=["foo_score"]) for record in records: self.assertIn(self.reg_proj.def_field, record) @@ -693,22 +731,22 @@ def test_export_passes_filters_as_arrays(self): self.reg_proj._call_api = mock.Mock() self.reg_proj._call_api.return_value = (None, None) self.reg_proj.export_records( - records=['record0', 'record1', 'record2'], - fields=['field0', 'field1', 'field2'], - forms=['form0', 'form1', 'form2'], - events=['event0', 'event1', 'event2'] + records=["record0", "record1", "record2"], + fields=["field0", "field1", "field2"], + forms=["form0", "form1", "form2"], + events=["event0", "event1", "event2"], ) args, _ = self.reg_proj._call_api.call_args payload = args[0] - self.assertEqual(payload['records[0]'], 'record0') - self.assertEqual(payload['records[1]'], 'record1') - self.assertEqual(payload['records[2]'], 'record2') - self.assertEqual(payload['fields[1]'], 'field1') - self.assertEqual(payload['forms[2]'], 'form2') - self.assertEqual(payload['events[0]'], 'event0') + self.assertEqual(payload["records[0]"], "record0") + self.assertEqual(payload["records[1]"], "record1") + self.assertEqual(payload["records[2]"], "record2") + self.assertEqual(payload["fields[1]"], "field1") + self.assertEqual(payload["forms[2]"], "form2") + self.assertEqual(payload["events[0]"], "event0") @responses.activate def test_generate_next_record_name(self): @@ -726,4 +764,7 @@ def test_export_project_info(self): info = self.reg_proj.export_project_info() - self.assertEqual(info['project_id'], 123) + self.assertEqual(info["project_id"], 123) + + +# pylint: enable=too-many-public-methods diff --git a/test/test_request.py b/test/test_request.py index 5ae0375..c13d749 100644 --- a/test/test_request.py +++ b/test/test_request.py @@ -1,7 +1,9 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +"""Test low level API interface""" import unittest + import responses from redcap import RCRequest, RCAPIError @@ -12,56 +14,61 @@ class TestClass(unittest.TestCase): def setUp(self): """ We can use Kenneth Reitz's httpbin.org to test requests """ - self.url = 'http://httpbin.org' - self.base = {'token': '8E66DB6844D58E990075AFB51658A002', - 'format': 'json', - 'type': 'flat'} - self.badmd = {'token': 'B82CB05641E3BE8247E5F852EAFC5C21', - 'format': 'json', - 'type': 'flat', - 'content': 'metadata'} + self.url = "http://httpbin.org" + self.base = { + "token": "8E66DB6844D58E990075AFB51658A002", + "format": "json", + "type": "flat", + } + self.badmd = { + "token": "B82CB05641E3BE8247E5F852EAFC5C21", + "format": "json", + "type": "flat", + "content": "metadata", + } def tearDown(self): pass def test_md_content(self): """Test that RCRequest throws correctly for malformed payloads""" - pl = self.base - ags = [self.url, pl, 'metadata'] + payload = self.base + ags = [self.url, payload, "metadata"] # no 'content' key self.assertRaises(RCAPIError, RCRequest, *ags) # wrong content - pl['content'] = 'blahblah' + payload["content"] = "blahblah" self.assertRaises(RCAPIError, RCRequest, *ags) # good content - pl['content'] = 'metadata' - r = RCRequest(*ags) - self.assertIsInstance(r, RCRequest) + payload["content"] = "metadata" + res = RCRequest(*ags) + self.assertIsInstance(res, RCRequest) @responses.activate def test_bad_md(self): """Test that newlines are appropriately dealt with""" - responses.add(responses.POST, 'https://redcap.vanderbilt.edu/api/') + responses.add(responses.POST, "https://redcap.vanderbilt.edu/api/") - args = ['https://redcap.vanderbilt.edu/api/', self.badmd, 'metadata'] - r = RCRequest(*args).execute() - self.assertTrue(r is not None) - self.assertTrue(len(r) > 0) + args = ["https://redcap.vanderbilt.edu/api/", self.badmd, "metadata"] + res = RCRequest(*args).execute() + self.assertTrue(res is not None) + self.assertTrue(len(res) > 0) def test_survey_participant_list_type(self): + """Test that partcipant list checks for proper content""" payload = { - 'token': 'foobar', - 'content': 'participantList', - 'format': 'json', - 'instrument': 'bar', + "token": "foobar", + "content": "participantList", + "format": "json", + "instrument": "bar", } - url = 'https://foobarbat.com' - typee = 'exp_survey_participant_list' + url = "https://foobarbat.com" + typee = "exp_survey_participant_list" # This should not raise RCRequest(url, payload, typee) # This should raise because of a different content - payload['content'] = 'foobar' + payload["content"] = "foobar" with self.assertRaises(RCAPIError): RCRequest(url, payload, typee)