instance_id
stringlengths 13
57
| patch
stringlengths 273
19.3k
| repo
stringlengths 9
53
| base_commit
stringlengths 40
40
| hints_text
stringclasses 1
value | test_patch
stringlengths 212
195k
| problem_statement
stringlengths 40
7.66k
| version
stringclasses 1
value | environment_setup_commit
stringlengths 40
40
| FAIL_TO_PASS
listlengths 1
144
| PASS_TO_PASS
listlengths 0
1.46k
| meta
dict | created_at
stringdate 2015-11-16 22:59:02
2024-04-24 11:36:26
| license
stringclasses 7
values | __index_level_0__
int64 1
6.4k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
arviz-devs__arviz-1076
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bee1c84..16824f2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -36,6 +36,7 @@
* Fix `io_pymc3.py` to handle models with `potentials` (#1043)
* Fix several inconsistencies between schema and `from_pymc3` implementation
in groups `prior`, `prior_predictive` and `observed_data` (#1045)
+* Stabilize covariance matrix for `plot_kde_2d` (#1075)
### Deprecation
diff --git a/arviz/plots/backends/matplotlib/traceplot.py b/arviz/plots/backends/matplotlib/traceplot.py
index 437f925..e181417 100644
--- a/arviz/plots/backends/matplotlib/traceplot.py
+++ b/arviz/plots/backends/matplotlib/traceplot.py
@@ -1,5 +1,6 @@
"""Matplotlib traceplot."""
+import warnings
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import numpy as np
@@ -48,8 +49,8 @@ def plot_trace(
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE. Only affects continuous
variables.
- lines : tuple
- Tuple of (var_name, {'coord': selection}, [line, positions]) to be overplotted as
+ lines : tuple or list
+ list of tuple of (var_name, {'coord': selection}, [line_positions]) to be overplotted as
vertical lines on the density and horizontal lines on the trace.
combined : bool
Flag for combining multiple chains into a single line. If False (default), chains will be
@@ -124,6 +125,21 @@ def plot_trace(
_, axes = plt.subplots(len(plotters), 2, squeeze=False, figsize=figsize, **backend_kwargs)
+ # Check the input for lines
+ if lines is not None:
+ all_var_names = set(plotter[0] for plotter in plotters)
+
+ invalid_var_names = set()
+ for line in lines:
+ if line[0] not in all_var_names:
+ invalid_var_names.add(line[0])
+ if invalid_var_names:
+ warnings.warn(
+ "A valid var_name should be provided, found {} expected from {}".format(
+ invalid_var_names, all_var_names
+ )
+ )
+
for idx, (var_name, selection, value) in enumerate(plotters):
value = np.atleast_2d(value)
@@ -219,6 +235,10 @@ def plot_trace(
line_values = [vlines]
else:
line_values = np.atleast_1d(vlines).ravel()
+ if not np.issubdtype(line_values.dtype, np.number):
+ raise ValueError(
+ "line-positions should be numeric, found {}".format(line_values)
+ )
axes[idx, 0].vlines(line_values, *ylims[0], colors="black", linewidth=1.5, alpha=0.75)
axes[idx, 1].hlines(
line_values, *xlims[1], colors="black", linewidth=1.5, alpha=trace_kwargs["alpha"]
diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py
index 3d1c189..a611401 100644
--- a/arviz/plots/plot_utils.py
+++ b/arviz/plots/plot_utils.py
@@ -818,7 +818,9 @@ def _cov(data):
x -= avg[:, None]
prod = _dot(x, x.T.conj())
prod *= np.true_divide(1, ddof)
- return prod.squeeze()
+ prod = prod.squeeze()
+ prod += 1e-6 * np.eye(prod.shape[0])
+ return prod
else:
raise ValueError("{} dimension arrays are not supported".format(data.ndim))
|
arviz-devs/arviz
|
0eef3b95eff477541ba599f15687612652074b7e
|
diff --git a/arviz/tests/test_plots_matplotlib.py b/arviz/tests/test_plots_matplotlib.py
index a688a79..c0fa4ae 100644
--- a/arviz/tests/test_plots_matplotlib.py
+++ b/arviz/tests/test_plots_matplotlib.py
@@ -156,6 +156,21 @@ def test_plot_trace_max_subplots_warning(models):
assert axes.shape
[email protected]("kwargs", [{"var_names": ["mu", "tau"], "lines": [("hey", {}, [1])]}])
+def test_plot_trace_invalid_varname_warning(models, kwargs):
+ with pytest.warns(UserWarning, match="valid var.+should be provided"):
+ axes = plot_trace(models.model_1, **kwargs)
+ assert axes.shape
+
+
[email protected](
+ "bad_kwargs", [{"var_names": ["mu", "tau"], "lines": [("mu", {}, ["hey"])]}]
+)
+def test_plot_trace_bad_lines_value(models, bad_kwargs):
+ with pytest.raises(ValueError, match="line-positions should be numeric"):
+ plot_trace(models.model_1, **bad_kwargs)
+
+
@pytest.mark.parametrize("model_fits", [["model_1"], ["model_1", "model_2"]])
@pytest.mark.parametrize(
"args_expected",
@@ -701,7 +716,6 @@ def test_plot_posterior_point_estimates(models, point_estimate):
"kwargs", [{"insample_dev": False}, {"plot_standard_error": False}, {"plot_ic_diff": False}]
)
def test_plot_compare(models, kwargs):
-
model_compare = compare({"Model 1": models.model_1, "Model 2": models.model_2})
axes = plot_compare(model_compare, **kwargs)
|
plot_trace lines is unclear and it may yield unexpected results
**Describe the bug**
The argument `lines` for the function `plot_trace` can give unexpected results. Moreover, the documentation is a bit nebulous.
**To Reproduce**
A toy example is defined
```python
import pymc3 as pm
import arviz as az
import numpy as np
# fake data
mu_real = 0
sigma_real = 1
n_samples = 150
Y = np.random.normal(loc=mu_real, scale=sigma_real, size=n_samples)
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sigma=10)
sigma = pm.HalfNormal('sigma', sigma=10)
likelihood = pm.Normal('likelihood', mu=mu, sigma=sigma, observed=Y)
trace = pm.sample()
```
As per [documentation](https://arviz-devs.github.io/arviz/generated/arviz.plot_trace.html#arviz.plot_trace), the argument `lines` accepts a tuple in the form `(var_name, {‘coord’: selection}, [line, positions])`. So, the command
```python
az.plot_trace(trace, lines=(('mu', {}, mu_real),))
```
yields correctly

I can also pass a list of tuples or a list of tuples and lists and it will work fine:
```
az.plot_trace(trace, lines=[('mu', {}, mu_real)]) # list of tuples
az.plot_trace(trace, lines=[['mu', {}, mu_real]]) # list of lists
az.plot_trace(trace, lines=[['mu', {}, mu_real], ('sigma', {}, sigma_real)]) # list of lists and tuples
```
however, I cannot pass a simple tuple because I will get a `KeyError: 0`
```python
az.plot_trace(trace, lines=(['mu', {}, mu_real]))
az.plot_trace(trace, lines=(('mu', {}, mu_real)))
```
Also, I can pass a variable or coordinate name that do not exist and Arviz will not complain---but not lines will be plotted (here I would expect a warning)
```python
az.plot_trace(trace, lines=[('hey', {}, mu_real)])
az.plot_trace(trace, lines=[('mu', {'hey'}, mu_real)])
```

The weird behavior happens when I pass a string:
```python
az.plot_trace(trace, lines=[('mu', {}, 'hey')])
```

**Expected behavior**
The [documentation](https://arviz-devs.github.io/arviz/generated/arviz.plot_trace.html#arviz.plot_trace) could be improved and the function could check the inputs. In addition to what described above, the placeholder `[line, positions]` in `(var_name, {‘coord’: selection}, [line, positions])` should be something like `[line_positions]` otherwise one may think (like myself :) ) that two values should be inserted (one for `line` and one for `positions`).
**Additional context**
I am using Win10, fresh conda environment with PyMC3 and Arviz from master.
Possibly related https://github.com/pymc-devs/pymc3/issues/3495, https://github.com/pymc-devs/pymc3/issues/3497
|
0.0
|
0eef3b95eff477541ba599f15687612652074b7e
|
[
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_invalid_varname_warning[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_bad_lines_value[bad_kwargs0]"
] |
[
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_float[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_density_bad_kwargs",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace[kwargs9]",
"arviz/tests/test_plots_matplotlib.py::test_plot_trace_max_subplots_warning",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected0-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected0-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected1-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected1-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected2-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected2-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected3-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected3-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected4-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected4-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected5-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected5-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected6-model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest[args_expected6-model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_rope_exception",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_single_value",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_bad[model_fits0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_forest_bad[model_fits1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy[hist]",
"arviz/tests/test_plots_matplotlib.py::test_plot_energy_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_raises_valueerror",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[normal]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[minmax]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel[rank]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_parallel_exception[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_joint_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x0]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x1]",
"arviz/tests/test_plots_matplotlib.py::test_cov[x2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_cumulative[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_dist_2d_kde[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_quantiles[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_kde_inference_data",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_2var[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_divergences_warning[True]",
"arviz/tests/test_plots_matplotlib.py::test_plot_pair_divergences_warning[False]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-0.2-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[False-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-0.2-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc[True-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-0.1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[False-3-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-None-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-0.1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-1-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_multichain[True-3-scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_discrete[False-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_discrete[True-cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_grid",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad[scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[kde]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[cumulative]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_ax[scatter]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ppc_bad_ax",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin_ax",
"arviz/tests/test_plots_matplotlib.py::test_plot_violin_layout",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_short_chain",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_uncombined",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_combined",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[None]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[mu]",
"arviz/tests/test_plots_matplotlib.py::test_plot_autocorr_var_names[var_names2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_rank[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs8]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs9]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs10]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs11]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior[kwargs12]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_bad",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[mode]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[mean]",
"arviz/tests/test_plots_matplotlib.py::test_plot_posterior_point_estimates[median]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_hpd[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits0]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits1]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_scipy[limits2]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits0]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits1]",
"arviz/tests/test_plots_matplotlib.py::test_fast_kde_cumulative[limits2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_elpd_one_model",
"arviz/tests/test_plots_matplotlib.py::test_plot_khat_annotate",
"arviz/tests/test_plots_matplotlib.py::test_plot_khat_bad_input",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[local-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[quantile-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess[evolution-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[local-kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_local_quantile[quantile-kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_evolution",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_kind",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_coords[chain]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_bad_coords[draw]",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_no_sample_stats",
"arviz/tests/test_plots_matplotlib.py::test_plot_ess_no_divergences",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit[kwargs7]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_incompatible_args",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_loo_pit_label[args4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs0]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs1]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs2]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs3]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs4]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs5]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse[kwargs6]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_bad_coords[chain]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_bad_coords[draw]",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_no_sample_stats",
"arviz/tests/test_plots_matplotlib.py::test_plot_mcse_no_divergences"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-02-17 22:14:31+00:00
|
apache-2.0
| 1,099
|
|
pyca__bcrypt-86
|
diff --git a/.travis.yml b/.travis.yml
index 456aba7..bbcb336 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -30,6 +30,7 @@ matrix:
env: TOXENV=pypy CC=clang
- python: 2.7
env: TOXENV=pep8
+ - env: TOXENV=packaging
- python: 3.5
env: TOXENV=py3pep8
- language: generic
diff --git a/MANIFEST.in b/MANIFEST.in
index 622d66b..93a4480 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,11 @@
include LICENSE README.rst
+
+include tox.ini .coveragerc
include src/build_bcrypt.py
+
recursive-include src/_csrc *
+recursive-include tests *.py
+
+exclude requirements.txt tasks.py .travis.yml
+
+prune .travis
diff --git a/README.rst b/README.rst
index 0883286..3859bb8 100644
--- a/README.rst
+++ b/README.rst
@@ -40,6 +40,7 @@ Changelog
3.1.0
-----
* Added support for ``checkpw`` as another method of verifying a password.
+* Ensure that you get a ``$2y$`` hash when you input a ``$2y$`` salt.
3.0.0
-----
@@ -104,7 +105,7 @@ the work factor merely pass the desired number of rounds to
>>> hashed = bcrypt.hashpw(password, bcrypt.gensalt(14))
>>> # Check that a unhashed password matches one that has previously been
>>> # hashed
- >>> if bcrypt.hashpw(password, hashed) == hashed:
+ >>> if bcrypt.checkpw(password, hashed):
... print("It Matches!")
... else:
... print("It Does not Match :(")
diff --git a/src/bcrypt/__init__.py b/src/bcrypt/__init__.py
index d6acb84..abc9d75 100644
--- a/src/bcrypt/__init__.py
+++ b/src/bcrypt/__init__.py
@@ -39,10 +39,6 @@ __all__ = [
_normalize_re = re.compile(b"^\$2y\$")
-def _normalize_prefix(salt):
- return _normalize_re.sub(b"$2b$", salt)
-
-
def gensalt(rounds=12, prefix=b"2b"):
if prefix not in (b"2a", b"2b"):
raise ValueError("Supported prefixes are b'2a' or b'2b'")
@@ -75,7 +71,13 @@ def hashpw(password, salt):
# on $2a$, so we do it here to preserve compatibility with 2.0.0
password = password[:72]
- salt = _normalize_prefix(salt)
+ # When the original 8bit bug was found the original library we supported
+ # added a new prefix, $2y$, that fixes it. This prefix is exactly the same
+ # as the $2b$ prefix added by OpenBSD other than the name. Since the
+ # OpenBSD library does not support the $2y$ prefix, if the salt given to us
+ # is for the $2y$ prefix, we'll just mugne it so that it's a $2b$ prior to
+ # passing it into the C library.
+ original_salt, salt = salt, _normalize_re.sub(b"$2b$", salt)
hashed = _bcrypt.ffi.new("unsigned char[]", 128)
retval = _bcrypt.lib.bcrypt_hashpass(password, salt, hashed, len(hashed))
@@ -83,7 +85,13 @@ def hashpw(password, salt):
if retval != 0:
raise ValueError("Invalid salt")
- return _bcrypt.ffi.string(hashed)
+ # Now that we've gotten our hashed password, we want to ensure that the
+ # prefix we return is the one that was passed in, so we'll use the prefix
+ # from the original salt and concatenate that with the return value (minus
+ # the return value's prefix). This will ensure that if someone passed in a
+ # salt with a $2y$ prefix, that they get back a hash with a $2y$ prefix
+ # even though we munged it to $2b$.
+ return original_salt[:4] + _bcrypt.ffi.string(hashed)[4:]
def checkpw(password, hashed_password):
@@ -96,9 +104,6 @@ def checkpw(password, hashed_password):
"password and hashed_password may not contain NUL bytes"
)
- # If the user supplies a $2y$ prefix we normalize to $2b$
- hashed_password = _normalize_prefix(hashed_password)
-
ret = hashpw(password, hashed_password)
return _bcrypt.lib.timingsafe_bcmp(ret, hashed_password, len(ret)) == 0
diff --git a/tox.ini b/tox.ini
index abc6283..264d9aa 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = py26,py27,pypy,py33,py34,py35,pep8,py3pep8
+envlist = py26,py27,pypy,py33,py34,py35,pep8,py3pep8,packaging
[testenv]
# If you add a new dep here you probably need to add it in setup.py as well
@@ -27,6 +27,15 @@ deps =
commands =
flake8 .
+[testenv:packaging]
+deps =
+ check-manifest
+ readme_renderer
+commands =
+ check-manifest
+ python setup.py check --metadata --restructuredtext --strict
+
+
[flake8]
exclude = .tox,*.egg
select = E,W,F,N,I
|
pyca/bcrypt
|
c9c76210fad230995a6155287e8b92c49180eae4
|
diff --git a/tests/test_bcrypt.py b/tests/test_bcrypt.py
index 47f315a..d9bde72 100644
--- a/tests/test_bcrypt.py
+++ b/tests/test_bcrypt.py
@@ -152,12 +152,12 @@ _2y_test_vectors = [
(
b"\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
- b"$2b$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
+ b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq",
),
(
b"\xff\xff\xa3",
b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
- b"$2b$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
+ b"$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e",
),
]
|
Find a way to not use == in the README
I don't think it's actually exploitable as a timing attack (in fact I'm pretty sure it's not), but I think it'd be good hygeine to offer a check_password function or similar and use that, so we dont' have to expose a general purpose constant time comparison function.
|
0.0
|
c9c76210fad230995a6155287e8b92c49180eae4
|
[
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]"
] |
[
"tests/test_bcrypt.py::test_gensalt_basic",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[4-$2b$04$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[5-$2b$05$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[6-$2b$06$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[7-$2b$07$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[8-$2b$08$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[9-$2b$09$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[10-$2b$10$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[11-$2b$11$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[12-$2b$12$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[13-$2b$13$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[14-$2b$14$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[15-$2b$15$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[16-$2b$16$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[17-$2b$17$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[18-$2b$18$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[19-$2b$19$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[20-$2b$20$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[21-$2b$21$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[22-$2b$22$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[23-$2b$23$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_valid[24-$2b$24$KB.uKB.uKB.uKB.uKB.uK.]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[1]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[2]",
"tests/test_bcrypt.py::test_gensalt_rounds_invalid[3]",
"tests/test_bcrypt.py::test_gensalt_bad_prefix",
"tests/test_bcrypt.py::test_gensalt_2a_prefix",
"tests/test_bcrypt.py::test_hashpw_new[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_new[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_new[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_new[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_new[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_new[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_new[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_new[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_new[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_new[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_new[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_new[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_new[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_new[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_new[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_new[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_new[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_new[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_new[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_new[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_new[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_new[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_new[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_new[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_new[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_checkpw[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_checkpw[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_checkpw[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_checkpw[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_checkpw[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_checkpw[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_checkpw[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_checkpw[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_checkpw[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_checkpw[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_checkpw[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_checkpw[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_checkpw[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_checkpw[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_checkpw[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_checkpw[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_checkpw[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_checkpw[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_checkpw[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_checkpw[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_checkpw[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_checkpw[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_checkpw[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_checkpw[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_checkpw[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_hashpw_existing[Kk4DQuMMfZL9o-$2b$04$cVWp4XaNU8a4v1uMRum2SO-$2b$04$cVWp4XaNU8a4v1uMRum2SO026BWLIoQMD/TXg5uZV.0P.uO8m3YEm]",
"tests/test_bcrypt.py::test_hashpw_existing[9IeRXmnGxMYbs-$2b$04$pQ7gRO7e6wx/936oXhNjrO-$2b$04$pQ7gRO7e6wx/936oXhNjrOUNOHL1D0h1N2IDbJZYs.1ppzSof6SPy]",
"tests/test_bcrypt.py::test_hashpw_existing[xVQVbwa1S0M8r-$2b$04$SQe9knOzepOVKoYXo9xTte-$2b$04$SQe9knOzepOVKoYXo9xTteNYr6MBwVz4tpriJVe3PNgYufGIsgKcW]",
"tests/test_bcrypt.py::test_hashpw_existing[Zfgr26LWd22Za-$2b$04$eH8zX.q5Q.j2hO1NkVYJQO-$2b$04$eH8zX.q5Q.j2hO1NkVYJQOM6KxntS/ow3.YzVmFrE4t//CoF4fvne]",
"tests/test_bcrypt.py::test_hashpw_existing[Tg4daC27epFBE-$2b$04$ahiTdwRXpUG2JLRcIznxc.-$2b$04$ahiTdwRXpUG2JLRcIznxc.s1.ydaPGD372bsGs8NqyYjLY1inG5n2]",
"tests/test_bcrypt.py::test_hashpw_existing[xhQPMmwh5ALzW-$2b$04$nQn78dV0hGHf5wUBe0zOFu-$2b$04$nQn78dV0hGHf5wUBe0zOFu8n07ZbWWOKoGasZKRspZxtt.vBRNMIy]",
"tests/test_bcrypt.py::test_hashpw_existing[59je8h5Gj71tg-$2b$04$cvXudZ5ugTg95W.rOjMITu-$2b$04$cvXudZ5ugTg95W.rOjMITuM1jC0piCl3zF5cmGhzCibHZrNHkmckG]",
"tests/test_bcrypt.py::test_hashpw_existing[wT4fHJa2N9WSW-$2b$04$YYjtiq4Uh88yUsExO0RNTu-$2b$04$YYjtiq4Uh88yUsExO0RNTuEJ.tZlsONac16A8OcLHleWFjVawfGvO]",
"tests/test_bcrypt.py::test_hashpw_existing[uSgFRnQdOgm4S-$2b$04$WLTjgY/pZSyqX/fbMbJzf.-$2b$04$WLTjgY/pZSyqX/fbMbJzf.qxCeTMQOzgL.CimRjMHtMxd/VGKojMu]",
"tests/test_bcrypt.py::test_hashpw_existing[tEPtJZXur16Vg-$2b$04$2moPs/x/wnCfeQ5pCheMcu-$2b$04$2moPs/x/wnCfeQ5pCheMcuSJQ/KYjOZG780UjA/SiR.KsYWNrC7SG]",
"tests/test_bcrypt.py::test_hashpw_existing[vvho8C6nlVf9K-$2b$04$HrEYC/AQ2HS77G78cQDZQ.-$2b$04$HrEYC/AQ2HS77G78cQDZQ.r44WGcruKw03KHlnp71yVQEwpsi3xl2]",
"tests/test_bcrypt.py::test_hashpw_existing[5auCCY9by0Ruf-$2b$04$vVYgSTfB8KVbmhbZE/k3R.-$2b$04$vVYgSTfB8KVbmhbZE/k3R.ux9A0lJUM4CZwCkHI9fifke2.rTF7MG]",
"tests/test_bcrypt.py::test_hashpw_existing[GtTkR6qn2QOZW-$2b$04$JfoNrR8.doieoI8..F.C1O-$2b$04$JfoNrR8.doieoI8..F.C1OQgwE3uTeuardy6lw0AjALUzOARoyf2m]",
"tests/test_bcrypt.py::test_hashpw_existing[zKo8vdFSnjX0f-$2b$04$HP3I0PUs7KBEzMBNFw7o3O-$2b$04$HP3I0PUs7KBEzMBNFw7o3O7f/uxaZU7aaDot1quHMgB2yrwBXsgyy]",
"tests/test_bcrypt.py::test_hashpw_existing[I9VfYlacJiwiK-$2b$04$xnFVhJsTzsFBTeP3PpgbMe-$2b$04$xnFVhJsTzsFBTeP3PpgbMeMREb6rdKV9faW54Sx.yg9plf4jY8qT6]",
"tests/test_bcrypt.py::test_hashpw_existing[VFPO7YXnHQbQO-$2b$04$WQp9.igoLqVr6Qk70mz6xu-$2b$04$WQp9.igoLqVr6Qk70mz6xuRxE0RttVXXdukpR9N54x17ecad34ZF6]",
"tests/test_bcrypt.py::test_hashpw_existing[VDx5BdxfxstYk-$2b$04$xgZtlonpAHSU/njOCdKztO-$2b$04$xgZtlonpAHSU/njOCdKztOPuPFzCNVpB4LGicO4/OGgHv.uKHkwsS]",
"tests/test_bcrypt.py::test_hashpw_existing[dEe6XfVGrrfSH-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.-$2b$04$2Siw3Nv3Q/gTOIPetAyPr.GNj3aO0lb1E5E9UumYGKjP9BYqlNWJe]",
"tests/test_bcrypt.py::test_hashpw_existing[cTT0EAFdwJiLn-$2b$04$7/Qj7Kd8BcSahPO4khB8me-$2b$04$7/Qj7Kd8BcSahPO4khB8me4ssDJCW3r4OGYqPF87jxtrSyPj5cS5m]",
"tests/test_bcrypt.py::test_hashpw_existing[J8eHUDuxBB520-$2b$04$VvlCUKbTMjaxaYJ.k5juoe-$2b$04$VvlCUKbTMjaxaYJ.k5juoecpG/7IzcH1AkmqKi.lIZMVIOLClWAk.]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.E5YPO9kmyuRGyh0XouQYb4YMJKvyOeW]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*-$2a$05$CCCCCCCCCCCCCCCCCCCCC.-$2a$05$CCCCCCCCCCCCCCCCCCCCC.VGOzA784oUp/Z0DY336zx7pLYAy0lwK]",
"tests/test_bcrypt.py::test_hashpw_existing[U*U*U-$2a$05$XXXXXXXXXXXXXXXXXXXXXO-$2a$05$XXXXXXXXXXXXXXXXXXXXXOAcXxm9kjPGEMsLznoKqmqw7tc8WCx4a]",
"tests/test_bcrypt.py::test_hashpw_existing[0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789chars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa\\xaachars",
"tests/test_bcrypt.py::test_hashpw_existing[\\xa3-$2a$05$/OK.fbVrR/bpIqNJ5ianF.-$2a$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq-$2y$05$/OK.fbVrR/bpIqNJ5ianF.Sa7shbm4.OzKpvFnX1pQLmQW96oUlCq]",
"tests/test_bcrypt.py::test_checkpw_2y_prefix[\\xff\\xff\\xa3-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e-$2y$05$/OK.fbVrR/bpIqNJ5ianF.CE5elHaaO4EbggVDjb8P19RukzXSM3e]",
"tests/test_bcrypt.py::test_hashpw_invalid",
"tests/test_bcrypt.py::test_checkpw_wrong_password",
"tests/test_bcrypt.py::test_checkpw_bad_salt",
"tests/test_bcrypt.py::test_checkpw_str_password",
"tests/test_bcrypt.py::test_checkpw_str_salt",
"tests/test_bcrypt.py::test_hashpw_str_password",
"tests/test_bcrypt.py::test_hashpw_str_salt",
"tests/test_bcrypt.py::test_checkpw_nul_byte",
"tests/test_bcrypt.py::test_hashpw_nul_byte",
"tests/test_bcrypt.py::test_kdf[4-password-salt-[\\xbf\\x0c\\xc2\\x93X\\x7f\\x1c65U\\'ye\\x98\\xd4~W\\x90q\\xbfB~\\x9d\\x8f\\xbe\\x84*\\xba4\\xd9]",
"tests/test_bcrypt.py::test_kdf[4-password-\\x00-\\xc1+Vb5\\xee\\xe0L!%\\x98\\x97\\nW\\x9ag]",
"tests/test_bcrypt.py::test_kdf[4-\\x00-salt-`Q\\xbe\\x18\\xc2\\xf4\\xf8,\\xbf\\x0e\\xfe\\xe5G\\x1bK\\xb9]",
"tests/test_bcrypt.py::test_kdf[4-password\\x00-salt\\x00-t\\x10\\xe4L\\xf4\\xfa\\x07\\xbf\\xaa\\xc8\\xa9(\\xb1r\\x7f\\xac\\x00\\x13u\\xe7\\xbfs\\x847\\x0fH\\xef\\xd1!t0P]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00wor-sa\\x00l-\\xc2\\xbf\\xfd\\x9d\\xb3\\x8fei\\xef\\xefCr\\xf4\\xde\\x83\\xc0]",
"tests/test_bcrypt.py::test_kdf[4-pass\\x00word-sa\\x00lt-K\\xa4\\xac9%\\xc0\\xe8\\xd7\\xf0\\xcd\\xb6\\xbb\\x16\\x84\\xa5o]",
"tests/test_bcrypt.py::test_kdf[8-password-salt-\\xe16~\\xc5\\x15\\x1a3\\xfa\\xacL\\xc1\\xc1D\\xcd#\\xfa\\x15\\xd5T\\x84\\x93\\xec\\xc9\\x9b\\x9b]\\x9c\\r;'\\xbe\\xc7b'\\xeaf\\x08\\x8b\\x84\\x9b",
"tests/test_bcrypt.py::test_kdf[42-password-salt-\\x83<\\xf0\\xdc\\xf5m\\xb6V\\x08\\xe8\\xf0\\xdc\\x0c\\xe8\\x82\\xbd]",
"tests/test_bcrypt.py::test_kdf[8-Lorem",
"tests/test_bcrypt.py::test_kdf[8-\\r\\xb3\\xac\\x94\\xb3\\xeeS(OJ\"\\x89;<$\\xae-:b\\xf0\\xf0\\xdb\\xce\\xf8#\\xcf\\xcc\\x85HV\\xea\\x10(-",
"tests/test_bcrypt.py::test_kdf[8-\\xe1\\xbd\\x88\\xce\\xb4\\xcf\\x85\\xcf\\x83\\xcf\\x83\\xce\\xb5\\xcf\\x8d\\xcf\\x82-\\xce\\xa4\\xce\\xb7\\xce\\xbb\\xce\\xad\\xce\\xbc\\xce\\xb1\\xcf\\x87\\xce\\xbf\\xcf\\x82-Cfl\\x9b\\t\\xef3\\xed\\x8c'\\xe8\\xe8\\xf3\\xe2\\xd8\\xe6]",
"tests/test_bcrypt.py::test_kdf_str_password",
"tests/test_bcrypt.py::test_kdf_str_salt",
"tests/test_bcrypt.py::test_invalid_params[pass-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[password-salt-10-10-TypeError]",
"tests/test_bcrypt.py::test_invalid_params[-$2b$04$cVWp4XaNU8a4v1uMRum2SO-10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password--10-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-0-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO--3-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-513-10-ValueError]",
"tests/test_bcrypt.py::test_invalid_params[password-$2b$04$cVWp4XaNU8a4v1uMRum2SO-20-0-ValueError]",
"tests/test_bcrypt.py::test_bcrypt_assert",
"tests/test_bcrypt.py::test_2a_wraparound_bug"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-06-30 16:21:32+00:00
|
apache-2.0
| 4,712
|
|
adamboche__python-marshmallow-union-33
|
diff --git a/src/marshmallow_union/__init__.py b/src/marshmallow_union/__init__.py
index 22d5ef4..ee47920 100644
--- a/src/marshmallow_union/__init__.py
+++ b/src/marshmallow_union/__init__.py
@@ -13,6 +13,7 @@ class MarshmallowUnionException(Exception):
class ExceptionGroup(MarshmallowUnionException):
"""Collection of possibly multiple exceptions."""
+
def __init__(self, msg: str, errors):
self.msg = msg
self.errors = errors
@@ -63,8 +64,8 @@ class Union(marshmallow.fields.Field):
for candidate_field in fields:
try:
- return candidate_field.serialize(
- attr, obj, error_store=error_store, **kwargs
+ return candidate_field._serialize(
+ value, attr, obj, error_store=error_store, **kwargs
)
except ValueError as e:
error_store.store_error({attr: e})
|
adamboche/python-marshmallow-union
|
58bfc9fb069e00478afba87da3e003464cbdaebe
|
diff --git a/tests/test_union.py b/tests/test_union.py
index 6377c06..c93f004 100644
--- a/tests/test_union.py
+++ b/tests/test_union.py
@@ -52,6 +52,14 @@ class IntStrSchema(marshmallow.Schema):
x = marshmallow_union.Union([marshmallow.fields.Int(), marshmallow.fields.String()])
+class ListUnionSchema(marshmallow.Schema):
+ """Schema with a list of unions."""
+
+ l = marshmallow.fields.List(
+ marshmallow_union.Union([marshmallow.fields.Int(), marshmallow.fields.String()])
+ )
+
+
@pytest.mark.parametrize(
"data, schema",
[
@@ -59,6 +67,7 @@ class IntStrSchema(marshmallow.Schema):
({"name": "Alice", "number_or_numbers": [25, 50]}, PersonSchema()),
({"name": "Alice", "number_or_numbers": [25, 50]}, OtherSchema()),
({"x": 5}, IntStrSchema()),
+ ({"l": ["h", 5, "n", 1]}, ListUnionSchema()),
({"x": "hello"}, IntStrSchema()),
({"items": {"a": 42, "b": [17]}}, MappingSchema()),
],
|
_serialize ignores the given value
The `_serialize` method of `Union` calls the `serialize` method (instead of `_serialize`) on the underlying fields. This means that it ignores the given `value` parameter, and may try to serialize a completely different value.
See: https://github.com/adamboche/python-marshmallow-union/blob/master/src/marshmallow_union/__init__.py#L66-L68
Initially reported in: https://github.com/lovasoa/marshmallow_dataclass/issues/67
|
0.0
|
58bfc9fb069e00478afba87da3e003464cbdaebe
|
[
"tests/test_union.py::test_round_trip[data4-schema4]"
] |
[
"tests/test_union.py::test_round_trip[data0-schema0]",
"tests/test_union.py::test_round_trip[data1-schema1]",
"tests/test_union.py::test_round_trip[data2-schema2]",
"tests/test_union.py::test_round_trip[data3-schema3]",
"tests/test_union.py::test_round_trip[data5-schema5]",
"tests/test_union.py::test_round_trip[data6-schema6]",
"tests/test_union.py::test_load_raises[data0-schema0]",
"tests/test_union.py::test_load_raises[data1-schema1]",
"tests/test_union.py::test_load_raises[data2-schema2]",
"tests/test_union.py::test_dump_raises[data0-schema0]",
"tests/test_union.py::test_dump_raises[data1-schema1]",
"tests/test_union.py::test_dump_raises[data2-schema2]"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-23 13:45:41+00:00
|
mit
| 882
|
|
tylerwince__pydbg-4
|
diff --git a/.travis.yml b/.travis.yml
index e1b9f62..0b6f2c8 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -10,7 +10,7 @@ python:
- "nightly"
# command to install dependencies
install:
- - pip install pydbg
+ - pip install -e .
# command to run tests
script:
- pytest -vv
diff --git a/pydbg.py b/pydbg.py
index 3695ef8..a5b06df 100644
--- a/pydbg.py
+++ b/pydbg.py
@@ -27,7 +27,10 @@ def dbg(exp):
for i in reversed(inspect.stack()):
if "dbg" in i.code_context[0]:
var_name = i.code_context[0][
- i.code_context[0].find("(") + 1 : i.code_context[0].find(")")
+ i.code_context[0].find("(")
+ + 1 : len(i.code_context[0])
+ - 1
+ - i.code_context[0][::-1].find(")")
]
print(f"[{i.filename}:{i.lineno}] {var_name} = {exp}")
break
|
tylerwince/pydbg
|
e29ce20677f434dcf302c2c6f7bd872d26d4f13b
|
diff --git a/tests/test_pydbg.py b/tests/test_pydbg.py
index 5b12508..bf91ea4 100644
--- a/tests/test_pydbg.py
+++ b/tests/test_pydbg.py
@@ -4,9 +4,6 @@ from pydbg import dbg
from contextlib import redirect_stdout
-def something():
- pass
-
cwd = os.getcwd()
def test_variables():
@@ -23,12 +20,19 @@ def test_variables():
dbg(strType)
dbg(boolType)
dbg(NoneType)
+ dbg(add(1, 2))
- want = f"""[{cwd}/tests/test_pydbg.py:21] intType = 2
-[{cwd}/tests/test_pydbg.py:22] floatType = 2.1
-[{cwd}/tests/test_pydbg.py:23] strType = mystring
-[{cwd}/tests/test_pydbg.py:24] boolType = True
-[{cwd}/tests/test_pydbg.py:25] NoneType = None
+ want = f"""[{cwd}/tests/test_pydbg.py:18] intType = 2
+[{cwd}/tests/test_pydbg.py:19] floatType = 2.1
+[{cwd}/tests/test_pydbg.py:20] strType = mystring
+[{cwd}/tests/test_pydbg.py:21] boolType = True
+[{cwd}/tests/test_pydbg.py:22] NoneType = None
+[{cwd}/tests/test_pydbg.py:23] add(1, 2) = 3
"""
- assert out.getvalue() == want
+ assert out.getvalue() == want
+
+
+def add(x, y):
+ return x + y
+
|
Parentheses aren't displayed correctly for functions
The closing parentheses around the function call is missing.
```
In [19]: def add(x, y):
...: z = x + y
...: return z
...:
In [20]: dbg(add(1, 2))
[<ipython-input-20-ac7b28727082>:1] add(1, 2 = 3
```
|
0.0
|
e29ce20677f434dcf302c2c6f7bd872d26d4f13b
|
[
"tests/test_pydbg.py::test_variables"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-01-21 17:37:47+00:00
|
mit
| 6,138
|
|
serge-sans-paille__gast-50
|
diff --git a/gast/ast2.py b/gast/ast2.py
index 0e0a8ed..8968b5b 100644
--- a/gast/ast2.py
+++ b/gast/ast2.py
@@ -40,6 +40,17 @@ class Ast2ToGAst(AstToGAst):
new_node.end_lineno = new_node.end_col_offset = None
return new_node
+ def visit_Assign(self, node):
+ new_node = gast.Assign(
+ self._visit(node.targets),
+ self._visit(node.value),
+ None, # type_comment
+ )
+
+ gast.copy_location(new_node, node)
+ new_node.end_lineno = new_node.end_col_offset = None
+ return new_node
+
def visit_For(self, node):
new_node = gast.For(
self._visit(node.target),
@@ -278,6 +289,15 @@ class GAstToAst2(GAstToAst):
ast.copy_location(new_node, node)
return new_node
+ def visit_Assign(self, node):
+ new_node = ast.Assign(
+ self._visit(node.targets),
+ self._visit(node.value),
+ )
+
+ ast.copy_location(new_node, node)
+ return new_node
+
def visit_For(self, node):
new_node = ast.For(
self._visit(node.target),
diff --git a/gast/ast3.py b/gast/ast3.py
index 2d56fca..d0f0d39 100644
--- a/gast/ast3.py
+++ b/gast/ast3.py
@@ -15,6 +15,17 @@ class Ast3ToGAst(AstToGAst):
def visit_Index(self, node):
return self._visit(node.value)
+ def visit_Assign(self, node):
+ new_node = gast.Assign(
+ self._visit(node.targets),
+ self._visit(node.value),
+ None, # type_comment
+ )
+
+ gast.copy_location(new_node, node)
+ new_node.end_lineno = new_node.end_col_offset = None
+ return new_node
+
if sys.version_info.minor < 8:
def visit_Module(self, node):
new_node = gast.Module(
@@ -247,6 +258,15 @@ class GAstToAst3(GAstToAst):
ast.copy_location(new_node, node)
return new_node
+ def visit_Assign(self, node):
+ new_node = ast.Assign(
+ self._visit(node.targets),
+ self._visit(node.value),
+ )
+
+ ast.copy_location(new_node, node)
+ return new_node
+
if sys.version_info.minor < 8:
def visit_Module(self, node):
diff --git a/gast/gast.py b/gast/gast.py
index 317d4b1..00150ca 100644
--- a/gast/gast.py
+++ b/gast/gast.py
@@ -61,7 +61,7 @@ _nodes = (
('Delete', (('targets',),
('lineno', 'col_offset', 'end_lineno', 'end_col_offset',),
(stmt,))),
- ('Assign', (('targets', 'value',),
+ ('Assign', (('targets', 'value', 'type_comment'),
('lineno', 'col_offset', 'end_lineno', 'end_col_offset',),
(stmt,))),
('AugAssign', (('target', 'op', 'value',),
|
serge-sans-paille/gast
|
365fa06e88f38c14cdff53d1ae437851a411e7a6
|
diff --git a/tests/test_compat.py b/tests/test_compat.py
index e944647..c04c433 100644
--- a/tests/test_compat.py
+++ b/tests/test_compat.py
@@ -61,7 +61,8 @@ class CompatTestCase(unittest.TestCase):
compile(gast.gast_to_ast(tree), '<test>', 'exec')
norm = ("Module(body=[Assign(targets=[Name(id='e', ctx=Store()"
", annotation=None, type_comment=None"
- ")], value=Constant(value=1, kind=None)), Expr(value="
+ ")], value=Constant(value=1, kind=None), "
+ "type_comment=None), Expr(value="
"JoinedStr(values=[FormattedValue(value=Name(id='e', "
"ctx=Load(), annotation=None, type_comment=None), "
"conversion=-1, format_spec=None)]))], "
@@ -74,7 +75,8 @@ class CompatTestCase(unittest.TestCase):
compile(gast.gast_to_ast(tree), '<test>', 'exec')
norm = ("Module(body=[Assign(targets=[Name(id='e', ctx=Store()"
", annotation=None, type_comment=None"
- ")], value=Constant(value=1, kind=None)), Expr(value="
+ ")], value=Constant(value=1, kind=None), "
+ "type_comment=None), Expr(value="
"JoinedStr(values=[Constant(value='e = ', kind=None), "
"FormattedValue(value=Name(id='e', ctx=Load(), "
"annotation=None, type_comment=None), "
|
ast.Assign need type_comment in Python3.8
In python3.8.5,I use gast to modify ast_node then convert back into ast by`gast_to_ast`. But the result is different with original `ast`.
It works in Python3.5 and Python2.7
The example code:
```python
import ast
import gast
import textwrap
import unittest
def code_gast_ast(source):
"""
Transform source_code into gast.Node and modify it,
then back to ast.Node.
"""
source = textwrap.dedent(source)
root = gast.parse(source)
new_root = GastNodeTransformer(root).apply()
ast_root = gast.gast_to_ast(new_root)
return ast.dump(ast_root)
def code_ast(source):
"""
Transform source_code into ast.Node, then dump it.
"""
source = textwrap.dedent(source)
root = ast.parse(source)
return ast.dump(root)
class GastNodeTransformer(gast.NodeTransformer):
def __init__(self, root):
self.root = root
def apply(self):
return self.generic_visit(self.root)
def visit_Name(self, node):
"""
Param in func is ast.Name in PY2, but ast.arg in PY3.
It will be generally represented by gast.Name in gast.
"""
if isinstance(node.ctx, gast.Param) and node.id != "self":
node.id += '_new'
return node
class TestPythonCompatibility(unittest.TestCase):
def _check_compatibility(self, source, target):
source_dump = code_gast_ast(source)
target_dump = code_ast(target)
self.assertEqual(source_dump, target_dump)
def test_call(self):
source = """
y = foo(*arg)
"""
target = """
y = foo(*arg_new)
"""
self._check_compatibility(source, target)
# source_dump gast-> ast
# Module(body=[Assign(targets=[Name(id='y', ctx=Store())], value=Call(func=Name(id='foo', ctx=Load()), args=[Starred(value=Name(id='arg_new', ctx=Load()), ctx=Load())], keywords=[]))], type_ignores=[])
# target_dump ast
# Module(body=[Assign(targets=[Name(id='y', ctx=Store())], value=Call(func=Name(id='foo', ctx=Load()), args=[Starred(value=Name(id='arg_new', ctx=Load()), ctx=Load())], keywords=[]), type_comment=None)], type_ignores=[])
```
After I modified the defination in `gast.py`, it works in python3.8
from
```
('Assign', (('targets', 'value',),
('lineno', 'col_offset', 'end_lineno', 'end_col_offset',),
(stmt,))),
```
into
```
('Assign', (('targets', 'value','type_comment'),
('lineno', 'col_offset', 'end_lineno', 'end_col_offset',),
(stmt,))),
```
|
0.0
|
365fa06e88f38c14cdff53d1ae437851a411e7a6
|
[
"tests/test_compat.py::CompatTestCase::test_FormattedValue",
"tests/test_compat.py::CompatTestCase::test_JoinedStr"
] |
[
"tests/test_compat.py::CompatTestCase::test_ArgAnnotation",
"tests/test_compat.py::CompatTestCase::test_Call",
"tests/test_compat.py::CompatTestCase::test_Ellipsis",
"tests/test_compat.py::CompatTestCase::test_ExtSlice",
"tests/test_compat.py::CompatTestCase::test_ExtSliceEllipsis",
"tests/test_compat.py::CompatTestCase::test_ExtSlices",
"tests/test_compat.py::CompatTestCase::test_Index",
"tests/test_compat.py::CompatTestCase::test_KeywordOnlyArgument",
"tests/test_compat.py::CompatTestCase::test_PosonlyArgs",
"tests/test_compat.py::CompatTestCase::test_Raise",
"tests/test_compat.py::CompatTestCase::test_TryExcept",
"tests/test_compat.py::CompatTestCase::test_TryExceptNamed",
"tests/test_compat.py::CompatTestCase::test_TryFinally",
"tests/test_compat.py::CompatTestCase::test_TypeIgnore",
"tests/test_compat.py::CompatTestCase::test_With",
"tests/test_compat.py::CompatTestCase::test_keyword_argument",
"tests/test_compat.py::CompatTestCase::test_star_argument"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-05 20:10:22+00:00
|
bsd-3-clause
| 5,480
|
|
scottstanie__sentineleof-11
|
diff --git a/eof/cli.py b/eof/cli.py
index e5a2432..948c96c 100644
--- a/eof/cli.py
+++ b/eof/cli.py
@@ -35,7 +35,13 @@ from eof import log
type=click.Choice(["S1A", "S1B"]),
help="Optionally specify Sentinel satellite to download (default: gets both S1A and S1B)",
)
-def cli(search_path, save_dir, sentinel_file, date, mission):
[email protected](
+ "--use-scihub",
+ is_flag=True,
+ default=False,
+ help="Use SciHub as primary provider to download orbits (default: False)",
+)
+def cli(search_path, save_dir, sentinel_file, date, mission, use_scihub):
"""Download Sentinel precise orbit files.
Saves files to `save-dir` (default = current directory)
@@ -51,4 +57,5 @@ def cli(search_path, save_dir, sentinel_file, date, mission):
sentinel_file=sentinel_file,
mission=mission,
date=date,
+ use_scihub=use_scihub,
)
diff --git a/eof/download.py b/eof/download.py
index ab06273..b9db51e 100644
--- a/eof/download.py
+++ b/eof/download.py
@@ -45,7 +45,8 @@ PRECISE_ORBIT = "POEORB"
RESTITUTED_ORBIT = "RESORB"
-def download_eofs(orbit_dts=None, missions=None, sentinel_file=None, save_dir="."):
+def download_eofs(orbit_dts=None, missions=None, sentinel_file=None, save_dir=".",
+ use_scihub: bool = False):
"""Downloads and saves EOF files for specific dates
Args:
@@ -54,6 +55,8 @@ def download_eofs(orbit_dts=None, missions=None, sentinel_file=None, save_dir=".
No input downloads both, must be same len as orbit_dts
sentinel_file (str): path to Sentinel-1 filename to download one .EOF for
save_dir (str): directory to save the EOF files into
+ use_scihub (bool): use SciHub to download orbits
+ (if False SciHUb is used only as a fallback)
Returns:
list[str]: all filenames of saved orbit files
@@ -76,18 +79,51 @@ def download_eofs(orbit_dts=None, missions=None, sentinel_file=None, save_dir=".
# First make sures all are datetimes if given string
orbit_dts = [parse(dt) if isinstance(dt, str) else dt for dt in orbit_dts]
- # Download and save all links in parallel
- pool = ThreadPool(processes=MAX_WORKERS)
- result_dt_dict = {
- pool.apply_async(_download_and_write, (mission, dt, save_dir)): dt
- for mission, dt in zip(missions, orbit_dts)
- }
filenames = []
- for result in result_dt_dict:
- cur_filenames = result.get()
- dt = result_dt_dict[result]
- logger.info("Finished {}, saved to {}".format(dt.date(), cur_filenames))
- filenames.extend(cur_filenames)
+
+ if not use_scihub:
+ # Download and save all links in parallel
+ pool = ThreadPool(processes=MAX_WORKERS)
+ result_dt_dict = {
+ pool.apply_async(_download_and_write, (mission, dt, save_dir)): dt
+ for mission, dt in zip(missions, orbit_dts)
+ }
+
+ for result in result_dt_dict:
+ cur_filenames = result.get()
+ if cur_filenames is None:
+ use_scihub = True
+ continue
+ dt = result_dt_dict[result]
+ logger.info("Finished {}, saved to {}".format(dt.date(), cur_filenames))
+ filenames.extend(cur_filenames)
+
+ if use_scihub:
+ # try to search on scihub
+ from .scihubclient import ScihubGnssClient
+ client = ScihubGnssClient()
+ query = {}
+ if sentinel_file:
+ query.update(client.query_orbit_for_product(sentinel_file))
+ else:
+ for mission, dt in zip(missions, orbit_dts):
+ result = client.query_orbit(dt, dt + timedelta(days=1),
+ mission, product_type='AUX_POEORB')
+ if result:
+ query.update(result)
+ else:
+ # try with RESORB
+ result = client.query_orbit(dt, dt + timedelta(minutes=1),
+ mission,
+ product_type='AUX_RESORB')
+ query.update(result)
+
+ if query:
+ result = client.download_all(query)
+ filenames.extend(
+ item['path'] for item in result.downloaded.values()
+ )
+
return filenames
@@ -299,7 +335,8 @@ def find_scenes_to_download(search_path="./", save_dir="./"):
return orbit_dts, missions
-def main(search_path=".", save_dir=",", sentinel_file=None, mission=None, date=None):
+def main(search_path=".", save_dir=",", sentinel_file=None, mission=None, date=None,
+ use_scihub: bool = False):
"""Function used for entry point to download eofs"""
if not os.path.exists(save_dir):
@@ -331,4 +368,5 @@ def main(search_path=".", save_dir=",", sentinel_file=None, mission=None, date=N
missions=missions,
sentinel_file=sentinel_file,
save_dir=save_dir,
+ use_scihub=use_scihub,
)
diff --git a/eof/scihubclient.py b/eof/scihubclient.py
new file mode 100644
index 0000000..9060ea8
--- /dev/null
+++ b/eof/scihubclient.py
@@ -0,0 +1,143 @@
+"""sentinelsat based client to get orbit files form scihub.copernicu.eu."""
+
+
+import re
+import logging
+import datetime
+import operator
+import collections
+from typing import NamedTuple, Sequence
+
+from .products import Sentinel as S1Product
+
+from sentinelsat import SentinelAPI
+
+
+_log = logging.getLogger(__name__)
+
+
+DATE_FMT = '%Y%m%dT%H%M%S'
+
+
+class ValidityError(ValueError):
+ pass
+
+
+class ValidityInfo(NamedTuple):
+ product_id: str
+ generation_date: datetime.datetime
+ start_validity: datetime.datetime
+ stop_validity: datetime.datetime
+
+
+def get_validity_info(products: Sequence[str],
+ pattern=None) -> Sequence[ValidityInfo]:
+ if pattern is None:
+ # use a generic pattern
+ pattern = re.compile(
+ r'S1\w+_(?P<generation_date>\d{8}T\d{6})_'
+ r'V(?P<start_validity>\d{8}T\d{6})_'
+ r'(?P<stop_validity>\d{8}T\d{6})\w*')
+
+ keys = ('generation_date', 'start_validity', 'stop_validity')
+ out = []
+ for product_id in products:
+ mobj = pattern.match(product_id)
+ if mobj:
+ validity_data = {
+ name: datetime.datetime.strptime(mobj.group(name), DATE_FMT)
+ for name in keys
+ }
+ out.append(ValidityInfo(product_id, **validity_data))
+ else:
+ raise ValueError(
+ f'"{product_id}" does not math the regular expression '
+ f'for validity')
+
+ return out
+
+
+def lastval_cover(t0: datetime.datetime, t1: datetime.datetime,
+ data: Sequence[ValidityInfo]) -> str:
+ candidates = [
+ item for item in data
+ if item.start_validity <= t0 and item.stop_validity >= t1
+ ]
+ if not candidates:
+ raise ValidityError(
+ f'none of the input products completely covers the requested '
+ f'time interval: [t0={t0}, t1={t1}]')
+
+ candidates.sort(key=operator.attrgetter('generation_date'), reverse=True)
+
+ return candidates[0].product_id
+
+
+class OrbitSelectionError(RuntimeError):
+ pass
+
+
+class ScihubGnssClient:
+ T0 = datetime.timedelta(days=1)
+ T1 = datetime.timedelta(days=1)
+
+ def __init__(self, user: str = "gnssguest", password: str = "gnssguest",
+ api_url: str = "https://scihub.copernicus.eu/gnss/",
+ **kwargs):
+ self._api = SentinelAPI(user=user, password=password, api_url=api_url,
+ **kwargs)
+
+ def query_orbit(self, t0, t1, satellite_id: str,
+ product_type: str = 'AUX_POEORB'):
+ assert satellite_id in {'S1A', 'S1B'}
+ assert product_type in {'AUX_POEORB', 'AUX_RESORB'}
+
+ query_padams = dict(
+ producttype=product_type,
+ platformserialidentifier=satellite_id[1:],
+ date=[t0, t1],
+ )
+ _log.debug('query parameter: %s', query_padams)
+ products = self._api.query(**query_padams)
+ return products
+
+ @staticmethod
+ def _select_orbit(products, t0, t1):
+ orbit_products = [p['identifier'] for p in products.values()]
+ validity_info = get_validity_info(orbit_products)
+ product_id = lastval_cover(t0, t1, validity_info)
+ return collections.OrderedDict(
+ (k, v) for k, v in products.items()
+ if v['identifier'] == product_id
+ )
+
+ def query_orbit_for_product(self, product,
+ product_type: str = 'AUX_POEORB',
+ t0_margin: datetime.timedelta = T0,
+ t1_margin: datetime.timedelta = T1):
+ if isinstance(product, str):
+ product = S1Product(product)
+
+ t0 = product.start_time
+ t1 = product.stop_time
+
+ products = self.query_orbit(t0 - t0_margin, t1 + t1_margin,
+ satellite_id=product.mission,
+ product_type=product_type)
+ return self._select_orbit(products, t0, t1)
+
+ def download(self, uuid, **kwargs):
+ """Download a single orbit product.
+
+ See sentinelsat.SentinelAPI.download for a detailed description
+ of arguments.
+ """
+ return self._api.download(uuid, **kwargs)
+
+ def download_all(self, products, **kwargs):
+ """Download all the specified orbit products.
+
+ See sentinelsat.SentinelAPI.download_all for a detailed description
+ of arguments.
+ """
+ return self._api.download_all(products, **kwargs)
diff --git a/requirements.txt b/requirements.txt
index e5ecbfe..18eaad6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,3 +1,4 @@
python-dateutil==2.5.1
requests>=2.20.0
click==6.7
+sentinelsat>=1.0
diff --git a/setup.py b/setup.py
index fd829be..e82e843 100644
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,7 @@ setuptools.setup(
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Programming Language :: C",
"License :: OSI Approved :: MIT License",
"Topic :: Scientific/Engineering",
@@ -29,6 +30,7 @@ setuptools.setup(
"requests",
"click",
"python-dateutil",
+ "sentinelsat >= 1.0",
],
entry_points={
"console_scripts": [
|
scottstanie/sentineleof
|
59aba730a2f07bba25351abe51017bdaa4085c10
|
diff --git a/eof/tests/test_eof.py b/eof/tests/test_eof.py
index e89c9be..6ac9b68 100644
--- a/eof/tests/test_eof.py
+++ b/eof/tests/test_eof.py
@@ -159,4 +159,5 @@ class TestEOF(unittest.TestCase):
orbit_dts=["20200101"],
sentinel_file=None,
save_dir=",",
+ use_scihub=False,
)
|
https://qc.sentinel1.eo.esa.int discontinued
The old S1-QC site has been discontinued.
The alternative service https://qc.sentinel1.copernicus.eu should have the same API but it seems that currently it does not provided orbit files.
The recommended alternative is https://scihub.copernicus.eu/gnss which has a different API and requires authentication.
See also https://github.com/johntruckenbrodt/pyroSAR/pull/130.
|
0.0
|
59aba730a2f07bba25351abe51017bdaa4085c10
|
[
"eof/tests/test_eof.py::TestEOF::test_mission"
] |
[
"eof/tests/test_eof.py::TestEOF::test_find_scenes_to_download",
"eof/tests/test_eof.py::TestEOF::test_download_eofs_errors",
"eof/tests/test_eof.py::TestEOF::test_main_error_args",
"eof/tests/test_eof.py::TestEOF::test_main_nothing_found"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-04-30 19:13:02+00:00
|
mit
| 5,387
|
|
OnroerendErfgoed__skosprovider_rdf-119
|
diff --git a/HISTORY.rst b/HISTORY.rst
index b0e2a7b..3c5a727 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,3 +1,9 @@
+1.3.0 (??-12-2022)
+------------------
+
+- Don't export local id as dcterms.identifier when it's equal to the URI (#117)
+- Add support for Python 3.10 and Python 3.11 (#120)
+
1.2.0 (11-10-2022)
------------------
diff --git a/setup.py b/setup.py
index b97c9d9..b917507 100644
--- a/setup.py
+++ b/setup.py
@@ -24,7 +24,7 @@ requires = [
setup(
name='skosprovider_rdf',
- version='1.2.0',
+ version='1.3.0',
description='skosprovider_rdf',
long_description=README + '\n\n' + HISTORY,
long_description_content_type='text/x-rst',
diff --git a/skosprovider_rdf/utils.py b/skosprovider_rdf/utils.py
index ea19f7b..1914a80 100644
--- a/skosprovider_rdf/utils.py
+++ b/skosprovider_rdf/utils.py
@@ -142,7 +142,8 @@ def _add_c(graph, provider, id):
c = provider.get_by_id(id)
subject = URIRef(c.uri)
_add_in_dataset(graph, subject, provider)
- graph.add((subject, DCTERMS.identifier, Literal(c.id)))
+ if c.id != c.uri:
+ graph.add((subject, DCTERMS.identifier, Literal(c.id)))
conceptscheme = URIRef(provider.concept_scheme.uri)
graph.add((subject, SKOS.inScheme, conceptscheme))
_add_labels(graph, c, subject)
|
OnroerendErfgoed/skosprovider_rdf
|
9a68fb35e971caac8d7df45e6371f3132a85e9f2
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 86b9f11..446b318 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -2,6 +2,7 @@ import pytest
from rdflib import Graph
from rdflib import Namespace
from rdflib.namespace import DCTERMS
+from rdflib.namespace import DC
from rdflib.namespace import RDF
from rdflib.namespace import SKOS
from rdflib.term import Literal
@@ -255,6 +256,26 @@ class TestRDFDumperProducts:
graph_dump = utils.rdf_dumper(products_provider)
assert isinstance(graph_dump, Graph)
+ def test_dump_rdf_no_uri_as_local_identifier(self, products_provider, caplog):
+ caplog.set_level(logging.DEBUG)
+ graph_dump = utils.rdf_dumper(products_provider)
+
+ prod_uri = 'http://www.prodcuts.com/Product'
+ jewel_uri = 'http://www.products.com/Jewellery'
+ perfume_uri = 'http://www.products.com/Perfume'
+
+ prod = URIRef(prod_uri)
+ jewel = URIRef(jewel_uri)
+ perfume = URIRef(perfume_uri)
+
+ log.debug(graph_dump.serialize(format='turtle'))
+
+ assert (prod, DCTERMS.identifier, Literal(prod_uri)) not in graph_dump
+ assert (prod, DC.identifier, Literal(prod_uri)) not in graph_dump
+ assert (jewel, DCTERMS.identifier, Literal(jewel_uri)) not in graph_dump
+ assert (jewel, DC.identifier, Literal(jewel_uri)) not in graph_dump
+ assert (perfume, DCTERMS.identifier, Literal(perfume_uri)) not in graph_dump
+ assert (perfume, DC.identifier, Literal(perfume_uri)) not in graph_dump
class TestRDFDumperTrees:
|
Don't export local id as dcterms.identifier when it's the URI
When reading a SKOS file, we assign the URI to the id attribute because our skosprovider expects an internal identifier. But, when exporting the provider to SKOS again, we export this id to dcterms:identifier. This creates extra information in the SKOS file that isn't completely. Solution: when exporting to SKOS, if id is equal to uri, don't generate dcterms:identifier statements.
|
0.0
|
9a68fb35e971caac8d7df45e6371f3132a85e9f2
|
[
"tests/test_utils.py::TestRDFDumperProducts::test_dump_rdf_no_uri_as_local_identifier"
] |
[
"tests/test_utils.py::TestRDFDumperMaterials::test_dump_dictionary_to_rdf",
"tests/test_utils.py::TestRDFDumperMaterials::test_dump_collections_roundtrip",
"tests/test_utils.py::TestRDFDumperMaterials::test_dump_concept_with_superordinates",
"tests/test_utils.py::TestRDFDumperProducts::test_dump_rdf_to_rdf",
"tests/test_utils.py::TestRDFDumperProducts::test_dump_rdf_compare_type",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_tree_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_larch_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_chestnut_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_oak_to_rdf",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_one_id_to_rdf_and_reload",
"tests/test_utils.py::TestRDFDumperTrees::test_dump_conceptscheme_tree_to_rdf"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-15 10:18:13+00:00
|
mit
| 403
|
|
scrapinghub__web-poet-142
|
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index c88ec58..5ab56e4 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -2,6 +2,15 @@
Changelog
=========
+TBR
+---
+
+* Fix the error when calling :meth:`.to_item() <web_poet.pages.ItemPage.to_item>`,
+ :func:`item_from_fields_sync() <web_poet.fields.item_from_fields_sync>`, or
+ :func:`item_from_fields() <web_poet.fields.item_from_fields>` on page objects
+ defined as slotted attrs classes, while setting ``skip_nonitem_fields=True``.
+
+
0.8.0 (2023-02-23)
------------------
diff --git a/web_poet/pages.py b/web_poet/pages.py
index 77b39af..fed2fae 100644
--- a/web_poet/pages.py
+++ b/web_poet/pages.py
@@ -50,21 +50,34 @@ class Returns(typing.Generic[ItemT]):
return get_item_cls(self.__class__, default=dict)
+_NOT_SET = object()
+
+
class ItemPage(Injectable, Returns[ItemT]):
"""Base Page Object, with a default :meth:`to_item` implementation
which supports web-poet fields.
"""
- _skip_nonitem_fields: bool
+ _skip_nonitem_fields = _NOT_SET
+
+ def _get_skip_nonitem_fields(self) -> bool:
+ value = self._skip_nonitem_fields
+ return False if value is _NOT_SET else bool(value)
- def __init_subclass__(cls, skip_nonitem_fields: bool = False, **kwargs):
+ def __init_subclass__(cls, skip_nonitem_fields=_NOT_SET, **kwargs):
super().__init_subclass__(**kwargs)
+ if skip_nonitem_fields is _NOT_SET:
+ # This is a workaround for attrs issue.
+ # See: https://github.com/scrapinghub/web-poet/issues/141
+ return
cls._skip_nonitem_fields = skip_nonitem_fields
async def to_item(self) -> ItemT:
"""Extract an item from a web page"""
return await item_from_fields(
- self, item_cls=self.item_cls, skip_nonitem_fields=self._skip_nonitem_fields
+ self,
+ item_cls=self.item_cls,
+ skip_nonitem_fields=self._get_skip_nonitem_fields(),
)
|
scrapinghub/web-poet
|
a369635f3d4cf1acc22acf967625fe51c8cd57ae
|
diff --git a/tests/test_pages.py b/tests/test_pages.py
index fa3cf8d..d54f094 100644
--- a/tests/test_pages.py
+++ b/tests/test_pages.py
@@ -3,7 +3,7 @@ from typing import Optional
import attrs
import pytest
-from web_poet import HttpResponse, field
+from web_poet import HttpResponse, PageParams, field
from web_poet.pages import (
Injectable,
ItemPage,
@@ -199,11 +199,27 @@ async def test_item_page_change_item_type_remove_fields() -> None:
class Subclass(BasePage, Returns[Item], skip_nonitem_fields=True):
pass
- page = Subclass()
+ # Same as above but a slotted attrs class with dependency.
+ # See: https://github.com/scrapinghub/web-poet/issues/141
+ @attrs.define
+ class SubclassWithDep(BasePage, Returns[Item], skip_nonitem_fields=True):
+ params: PageParams
+
+ # Check if flicking skip_nonitem_fields to False in the subclass works
+ @attrs.define
+ class SubclassSkipFalse(SubclassWithDep, Returns[Item], skip_nonitem_fields=False):
+ pass
+
+ for page in [Subclass(), SubclassWithDep(params=PageParams())]:
+ assert page.item_cls is Item
+ item = await page.to_item()
+ assert isinstance(item, Item)
+ assert item == Item(name="hello")
+
+ page = SubclassSkipFalse(params=PageParams())
assert page.item_cls is Item
- item = await page.to_item()
- assert isinstance(item, Item)
- assert item == Item(name="hello")
+ with pytest.raises(TypeError, match="unexpected keyword argument 'price'"):
+ await page.to_item()
# Item only contains "name", but not "price", but "price" should be passed
class SubclassStrict(BasePage, Returns[Item]):
|
`skip_nonitem_fields=True` doesn't work when the page object is an attrs class
Currently, this works fine:
```python
import attrs
from web_poet import HttpResponse, Returns, ItemPage, field
@attrs.define
class BigItem:
x: int
y: int
class BigPage(ItemPage[BigItem]):
@field
def x(self):
return 1
@field
def y(self):
return 2
@attrs.define
class SmallItem:
x: int
class SmallXPage(BigPage, Returns[SmallItem], skip_nonitem_fields=True):
pass
page = SmallXPage()
item = await page.to_item()
print(page._skip_nonitem_fields) # True
print(item) # SmallItem(x=1)
```
However, if we define an attrs class to have some page dependencies, it doesn't work:
```python
from web_poet import PageParams
@attrs.define
class SmallPage(BigPage, Returns[SmallItem], skip_nonitem_fields=True):
params: PageParams
page = SmallPage(params=PageParams())
print(page._skip_nonitem_fields) # False
item = await page.to_item() # TypeError: __init__() got an unexpected keyword argument 'y'
```
From the examples above, this stems from `page._skip_nonitem_fields` being set to `False` when the page object is defined as an attrs class.
|
0.0
|
a369635f3d4cf1acc22acf967625fe51c8cd57ae
|
[
"tests/test_pages.py::test_item_page_change_item_type_remove_fields"
] |
[
"tests/test_pages.py::test_page_object",
"tests/test_pages.py::test_web_page_object",
"tests/test_pages.py::test_item_web_page_deprecated",
"tests/test_pages.py::test_is_injectable",
"tests/test_pages.py::test_item_page_typed",
"tests/test_pages.py::test_web_page_fields",
"tests/test_pages.py::test_item_page_typed_subclass",
"tests/test_pages.py::test_item_page_fields_typo",
"tests/test_pages.py::test_item_page_required_field_missing",
"tests/test_pages.py::test_item_page_change_item_type_extra_fields"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-02-22 15:11:06+00:00
|
bsd-3-clause
| 5,398
|
|
chriskuehl__rustenv-12
|
diff --git a/rustenv.py b/rustenv.py
index ac82bb7..4cdbbc2 100644
--- a/rustenv.py
+++ b/rustenv.py
@@ -47,6 +47,7 @@ deactivate_rustenv() {{
unset _RUSTENV_BIN_PATH
unset _RUSTENV_OLD_PS1
unset _RUSTENV_OLD_PATH
+ unset -f deactivate_rustenv
}}
'''
|
chriskuehl/rustenv
|
b2f3ae3f74bb7407c18e59700112098394eac323
|
diff --git a/tests/integration_test.py b/tests/integration_test.py
index 72088d5..e1b3e5c 100644
--- a/tests/integration_test.py
+++ b/tests/integration_test.py
@@ -22,6 +22,7 @@ report() {{
echo "[[$1-rustc:$(rustc --version 2>&1)]]"
echo "[[$1-cargo:$(cargo --version 2>&1)]]"
echo "[[$1-hello:$(hello 2>&1)]]"
+ echo "[[$1-deactivate_rustenv:$(type deactivate_rustenv | head -1)]]"
}}
report start
|
`deactivate_rustenv` doesn't remove `deactivate_rustenv`
```console
(renv) asottile@babibox:/tmp
$ deactivate_rustenv
asottile@babibox:/tmp
$ which cargo
asottile@babibox:/tmp
$ deactivate_rustenv
ehco hi
bash: ehco: No such file or directory
echo hi
hi
```
|
0.0
|
b2f3ae3f74bb7407c18e59700112098394eac323
|
[
"tests/integration_test.py::test_runenv_shell[bash-{}:"
] |
[
"tests/integration_test.py::test_rustenv_looks_sane"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-08-13 23:11:19+00:00
|
apache-2.0
| 1,580
|
|
WPI-MMR__gym_solo-6
|
diff --git a/gym_solo/core/rewards.py b/gym_solo/core/rewards.py
new file mode 100644
index 0000000..d364f98
--- /dev/null
+++ b/gym_solo/core/rewards.py
@@ -0,0 +1,63 @@
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from typing import List
+
+from gym_solo import solo_types
+
+
+class Reward(ABC):
+ @abstractmethod
+ def compute(self) -> solo_types.reward:
+ """Compute the reward for the current state.
+
+ Returns:
+ solo_types.reward: The reward evalulated at the current state.
+ """
+ pass
+
+
+@dataclass
+class _WeightedReward:
+ reward: Reward
+ weight: float
+
+
+class RewardFactory:
+ """A factory to dynamically create rewards.
+
+ Note that this factory is currently implemented to combined rewards via
+ a linear combination. For example, if the user wanted to register rewards
+ r1, r2, and r3, the final reward would be r1 + r2 + r3.
+
+ Obviously, you can add coefficients to the rewards, and that functionality
+ is further explained in register_reward() below.
+
+ If you need more functionality then a linear combination (exponential
+ temporal decay), then it's probably in your best interest to implement that
+ in a custom Reward.
+ """
+ def __init__(self):
+ """Create a new RewardFactory."""
+ self._rewards: List[_WeightedReward] = []
+
+ def register_reward(self, weight: float, reward: Reward):
+ """Register a reward to be computed per state.
+
+ Args:
+ weight (float): The weight to be applied to this reward when it is
+ is combined linearly with the other rewards. The domain for this
+ value is (-∞, ∞).
+ reward (Reward): A Reward object which .compute() will be called on at
+ reward computation time.
+ """
+ self._rewards.append(_WeightedReward(reward=reward, weight=weight))
+
+ def get_reward(self) -> float:
+ """Evaluate the current state and get the combined reward.
+
+ Returns:
+ float: The reward from the current state. Note that this reward is a
+ combination of multiple atomic sub-rewards, as explained by the
+ strategies earlier.
+ """
+ return sum(wr.weight * wr.reward.compute() for wr in self._rewards)
\ No newline at end of file
diff --git a/gym_solo/solo_types.py b/gym_solo/solo_types.py
index 85dc7ae..365d7e8 100644
--- a/gym_solo/solo_types.py
+++ b/gym_solo/solo_types.py
@@ -3,4 +3,7 @@ from typing import List
import numpy as np
# A state observation
-obs = np.ndarray
\ No newline at end of file
+obs = np.ndarray
+
+# A reward after a step
+reward = float
\ No newline at end of file
|
WPI-MMR/gym_solo
|
e4bcb11fb1b18da997494d56acf95bdd512a076a
|
diff --git a/gym_solo/core/test_rewards_factory.py b/gym_solo/core/test_rewards_factory.py
new file mode 100644
index 0000000..fd70bea
--- /dev/null
+++ b/gym_solo/core/test_rewards_factory.py
@@ -0,0 +1,35 @@
+import unittest
+from gym_solo.core import rewards
+
+from parameterized import parameterized
+
+
+class TestReward(rewards.Reward):
+ def __init__(self, return_value):
+ self._return_value = return_value
+
+ def compute(self):
+ return self._return_value
+
+
+class TestRewardsFactory(unittest.TestCase):
+ def test_empty(self):
+ rf = rewards.RewardFactory()
+ self.assertListEqual(rf._rewards, [])
+
+ @parameterized.expand([
+ ('single', {1: 2.5}, 2.5),
+ ('two_happy', {1: 1, 2: 2}, 5),
+ ('0-weight', {0: 1, 2: 2}, 4),
+ ('negative-weight', {-1: 1, 2: 2}, 3),
+ ('three', {1: 1, 2: 2, 3: 3}, 14),
+ ])
+ def test_register_and_compute(self, name, rewards_dict, expected_reward):
+ rf = rewards.RewardFactory()
+ for weight, reward in rewards_dict.items():
+ rf.register_reward(weight, TestReward(reward))
+ self.assertEqual(rf.get_reward(), expected_reward)
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
|
Create Rewards Factory
Similar to the observation factory. The reward factory should be able to take in `Reward` objects, similar to an `Observation` object, evaluate the rewards for the state, and combine them.
I'm thinking of making the final reward a linear combination of the registered rewards. With that in mind, consider the following example:
```python
r1 = Reward()
r2 = Reward()
r3 = Reward()
rf = RewardFactory()
r1.register_reward(-1, r1)
r1.register_reward(.1, r2)
r1.register_reward(.9, r3)
```
Notice that `register_reward()` has two args: `weight: float` and `reward: gym_solo.core.rewards.Reward`. Thus, the final reward would evaluate to: `-r1() + 0.1 * r2() + 0.9 * r3()`. Figure that if you need functionality more elaborate than a linear combination, you should be offloading that processing into a `Reward` class.
@mahajanrevant thoughts on this? lmk if you think we need anything stronger than linear combinations in the `RewardFactory`.
|
0.0
|
e4bcb11fb1b18da997494d56acf95bdd512a076a
|
[
"gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_empty",
"gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_0_single",
"gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_1_two_happy",
"gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_2_0_weight",
"gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_3_negative_weight",
"gym_solo/core/test_rewards_factory.py::TestRewardsFactory::test_register_and_compute_4_three"
] |
[] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-27 21:14:37+00:00
|
mit
| 835
|
|
praw-dev__praw-1960
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 2d776d7d..b893f83f 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -6,6 +6,10 @@ PRAW follows `semantic versioning <http://semver.org/>`_.
Unreleased
----------
+**Fixed**
+
+- An issue with replying to a modmail conversation results in a error.
+
7.7.0 (2023/02/25)
------------------
diff --git a/praw/models/reddit/modmail.py b/praw/models/reddit/modmail.py
index 2bdb981c..3ee6f08d 100644
--- a/praw/models/reddit/modmail.py
+++ b/praw/models/reddit/modmail.py
@@ -257,9 +257,15 @@ class ModmailConversation(RedditBase):
response = self._reddit.post(
API_PATH["modmail_conversation"].format(id=self.id), data=data
)
- message_id = response["conversation"]["objIds"][-1]["id"]
- message_data = response["messages"][message_id]
- return self._reddit._objector.objectify(message_data)
+ if isinstance(response, dict):
+ # Reddit recently changed the response format, so we need to handle both in case they change it back
+ message_id = response["conversation"]["objIds"][-1]["id"]
+ message_data = response["messages"][message_id]
+ return self._reddit._objector.objectify(message_data)
+ else:
+ for message in response.messages:
+ if message.id == response.obj_ids[-1]["id"]:
+ return message
def unarchive(self):
"""Unarchive the conversation.
|
praw-dev/praw
|
8bf669309a1476fa995f99c87c895fadf5436563
|
diff --git a/tests/integration/cassettes/TestModmailConversation.test_reply__internal.json b/tests/integration/cassettes/TestModmailConversation.test_reply__internal.json
new file mode 100644
index 00000000..5d10d7e1
--- /dev/null
+++ b/tests/integration/cassettes/TestModmailConversation.test_reply__internal.json
@@ -0,0 +1,217 @@
+{
+ "http_interactions": [
+ {
+ "recorded_at": "2023-07-11T20:33:14",
+ "request": {
+ "body": {
+ "encoding": "utf-8",
+ "string": "grant_type=refresh_token&refresh_token=<REFRESH_TOKEN>"
+ },
+ "headers": {
+ "Accept": [
+ "*/*"
+ ],
+ "Accept-Encoding": [
+ "identity"
+ ],
+ "Authorization": [
+ "Basic <BASIC_AUTH>"
+ ],
+ "Connection": [
+ "close"
+ ],
+ "Content-Length": [
+ "82"
+ ],
+ "Content-Type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "User-Agent": [
+ "<USER_AGENT> PRAW/7.7.1.dev0 prawcore/2.3.0"
+ ]
+ },
+ "method": "POST",
+ "uri": "https://www.reddit.com/api/v1/access_token"
+ },
+ "response": {
+ "body": {
+ "encoding": "UTF-8",
+ "string": "{\"access_token\": \"<ACCESS_TOKEN>\", \"token_type\": \"bearer\", \"expires_in\": 86400, \"refresh_token\": \"<REFRESH_TOKEN>\", \"scope\": \"creddits modnote modcontributors modmail modconfig subscribe structuredstyles vote wikiedit mysubreddits submit modlog modposts modflair save modothers read privatemessages report identity livemanage account modtraffic wikiread edit modwiki modself history flair\"}"
+ },
+ "headers": {
+ "Accept-Ranges": [
+ "bytes"
+ ],
+ "Cache-Control": [
+ "private, max-age=3600"
+ ],
+ "Connection": [
+ "close"
+ ],
+ "Content-Length": [
+ "1527"
+ ],
+ "Date": [
+ "Tue, 11 Jul 2023 20:33:14 GMT"
+ ],
+ "NEL": [
+ "{\"report_to\": \"w3-reporting-nel\", \"max_age\": 14400, \"include_subdomains\": false, \"success_fraction\": 1.0, \"failure_fraction\": 1.0}"
+ ],
+ "Report-To": [
+ "{\"group\": \"w3-reporting-nel\", \"max_age\": 14400, \"include_subdomains\": true, \"endpoints\": [{ \"url\": \"https://w3-reporting-nel.reddit.com/reports\" }]}, {\"group\": \"w3-reporting\", \"max_age\": 14400, \"include_subdomains\": true, \"endpoints\": [{ \"url\": \"https://w3-reporting.reddit.com/reports\" }]}, {\"group\": \"w3-reporting-csp\", \"max_age\": 14400, \"include_subdomains\": true, \"endpoints\": [{ \"url\": \"https://w3-reporting-csp.reddit.com/reports\" }]}"
+ ],
+ "Server": [
+ "snooserv"
+ ],
+ "Set-Cookie": [
+ "edgebucket=lUgiOW4d9OwlvT1wgL; Domain=reddit.com; Max-Age=63071999; Path=/; secure"
+ ],
+ "Strict-Transport-Security": [
+ "max-age=31536000; includeSubdomains"
+ ],
+ "Vary": [
+ "accept-encoding"
+ ],
+ "Via": [
+ "1.1 varnish"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "1; mode=block"
+ ],
+ "content-type": [
+ "application/json; charset=UTF-8"
+ ],
+ "x-moose": [
+ "majestic"
+ ]
+ },
+ "status": {
+ "code": 200,
+ "message": "OK"
+ },
+ "url": "https://www.reddit.com/api/v1/access_token"
+ }
+ },
+ {
+ "recorded_at": "2023-07-11T20:33:14",
+ "request": {
+ "body": {
+ "encoding": "utf-8",
+ "string": "api_type=json&body=A+message&isAuthorHidden=False&isInternal=True"
+ },
+ "headers": {
+ "Accept": [
+ "*/*"
+ ],
+ "Accept-Encoding": [
+ "identity"
+ ],
+ "Authorization": [
+ "bearer <ACCESS_TOKEN>"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "65"
+ ],
+ "Content-Type": [
+ "application/x-www-form-urlencoded"
+ ],
+ "Cookie": [
+ "edgebucket=lUgiOW4d9OwlvT1wgL"
+ ],
+ "User-Agent": [
+ "<USER_AGENT> PRAW/7.7.1.dev0 prawcore/2.3.0"
+ ]
+ },
+ "method": "POST",
+ "uri": "https://oauth.reddit.com/api/mod/conversations/1mahha?raw_json=1"
+ },
+ "response": {
+ "body": {
+ "encoding": "UTF-8",
+ "string": "{\"conversation\": {\"isAuto\": false, \"participant\": {\"isMod\": true, \"isAdmin\": false, \"name\": \"<USERNAME>\", \"isOp\": true, \"isParticipant\": true, \"isApproved\": false, \"isHidden\": false, \"id\": 561251419067, \"isDeleted\": false}, \"objIds\": [{\"id\": \"2bjn0i\", \"key\": \"messages\"}, {\"id\": \"2bjn0k\", \"key\": \"messages\"}, {\"id\": \"2bjn2z\", \"key\": \"messages\"}], \"isRepliable\": true, \"lastUserUpdate\": null, \"isInternal\": false, \"lastModUpdate\": \"2023-07-11T20:33:14.617000+0000\", \"authors\": [{\"isMod\": true, \"isAdmin\": false, \"name\": \"<USERNAME>\", \"isOp\": true, \"isParticipant\": true, \"isApproved\": false, \"isHidden\": false, \"id\": 561251419067, \"isDeleted\": false}], \"lastUpdated\": \"2023-07-11T20:33:14.617000+0000\", \"participantSubreddit\": {}, \"legacyFirstMessageId\": \"1wttevy\", \"state\": 1, \"conversationType\": \"sr_user\", \"lastUnread\": \"2023-07-11T00:00:00.000000+0000\", \"owner\": {\"displayName\": \"<TEST_SUBREDDIT>\", \"type\": \"subreddit\", \"id\": \"t5_29ey0j\"}, \"subject\": \"test\", \"id\": \"1mahha\", \"isHighlighted\": false, \"numMessages\": 3}, \"participantSubreddit\": {}, \"messages\": {\"2bjn0k\": {\"body\": \"<!-- SC_OFF --><div class=\\\"md\\\"><p>additional test</p>\\n</div><!-- SC_ON -->\", \"author\": {\"name\": \"<USERNAME>\", \"isApproved\": false, \"isMod\": true, \"isAdmin\": false, \"isOp\": true, \"isParticipant\": true, \"isHidden\": false, \"id\": 561251419067, \"isDeleted\": false}, \"isInternal\": true, \"date\": \"2023-07-11T20:32:12.025000+0000\", \"bodyMarkdown\": \"additional test\", \"id\": \"2bjn0k\", \"participatingAs\": \"moderator\"}, \"2bjn0i\": {\"body\": \"<!-- SC_OFF --><div class=\\\"md\\\"><p>testing</p>\\n</div><!-- SC_ON -->\", \"author\": {\"name\": \"<USERNAME>\", \"isApproved\": false, \"isMod\": true, \"isAdmin\": false, \"isOp\": true, \"isParticipant\": true, \"isHidden\": true, \"id\": 561251419067, \"isDeleted\": false}, \"isInternal\": false, \"date\": \"2023-07-11T20:32:11.586000+0000\", \"bodyMarkdown\": \"testing\", \"id\": \"2bjn0i\", \"participatingAs\": \"moderator\"}, \"2bjn2z\": {\"body\": \"<!-- SC_OFF --><div class=\\\"md\\\"><p>A message</p>\\n</div><!-- SC_ON -->\", \"author\": {\"name\": \"<USERNAME>\", \"isApproved\": false, \"isMod\": true, \"isAdmin\": false, \"isOp\": true, \"isParticipant\": true, \"isHidden\": false, \"id\": 561251419067, \"isDeleted\": false}, \"isInternal\": true, \"date\": \"2023-07-11T20:33:14.617000+0000\", \"bodyMarkdown\": \"A message\", \"id\": \"2bjn2z\", \"participatingAs\": \"moderator\"}}, \"user\": {\"recentComments\": {\"t1_i6yklz7\": {\"comment\": \"test reply\", \"date\": \"2022-05-01T22:37:21.936000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/uflrmv/test_post/i6yklz7/\", \"title\": \"Test post\"}}, \"muteStatus\": {\"muteCount\": 0, \"isMuted\": false, \"endDate\": null, \"reason\": \"\"}, \"name\": \"<USERNAME>\", \"created\": \"2020-07-04T21:34:49.063000+00:00\", \"banStatus\": {\"endDate\": null, \"reason\": \"\", \"isBanned\": false, \"isPermanent\": false}, \"isSuspended\": false, \"approveStatus\": {\"isApproved\": false}, \"isShadowBanned\": false, \"recentPosts\": {\"t3_z3wwe8\": {\"date\": \"2022-11-24T22:47:02.992000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3wwe8/test_post/\", \"title\": \"Test post\"}, \"t3_z4lkt4\": {\"date\": \"2022-11-25T19:16:07.058000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z4lkt4/test_post/\", \"title\": \"Test post\"}, \"t3_z3x0le\": {\"date\": \"2022-11-24T22:52:25.348000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3x0le/test_post/\", \"title\": \"Test post\"}, \"t3_z3xa9p\": {\"date\": \"2022-11-24T23:04:17.179000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3xa9p/test_post/\", \"title\": \"Test post\"}, \"t3_z3wslj\": {\"date\": \"2022-11-24T22:42:19.611000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3wslj/test_post/\", \"title\": \"Test post\"}, \"t3_z3wtr9\": {\"date\": \"2022-11-24T22:43:43.212000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3wtr9/test_post/\", \"title\": \"Test post\"}, \"t3_z3wv0z\": {\"date\": \"2022-11-24T22:45:18.381000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3wv0z/test_post/\", \"title\": \"Test post\"}, \"t3_z3x7gi\": {\"date\": \"2022-11-24T23:00:51.261000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3x7gi/test_post/\", \"title\": \"Test post\"}, \"t3_z3x64t\": {\"date\": \"2022-11-24T22:59:35.632000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/z3x64t/test_post/\", \"title\": \"Test post\"}, \"t3_14lt78w\": {\"date\": \"2023-06-29T02:57:46.846000+00:00\", \"permalink\": \"https://www.reddit.com/r/<TEST_SUBREDDIT>/comments/14lt78w/hi/\", \"title\": \"hi\"}}, \"recentConvos\": {\"fjhla\": {\"date\": \"2020-07-16T01:15:55.263000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/fjhla\", \"id\": \"fjhla\", \"subject\": \"Spam\"}, \"1magps\": {\"date\": \"2023-07-11T20:18:46.102000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/1magps\", \"id\": \"1magps\", \"subject\": \"test\"}, \"1magq3\": {\"date\": \"2023-07-11T20:28:57.787000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/1magq3\", \"id\": \"1magq3\", \"subject\": \"test\"}, \"1l7pjk\": {\"date\": \"2023-06-25T17:16:07.135000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/1l7pjk\", \"id\": \"1l7pjk\", \"subject\": \"invitation to moderate /r/<TEST_SUBREDDIT>\"}, \"1mahha\": {\"date\": \"2023-07-11T20:33:14.617000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/1mahha\", \"id\": \"1mahha\", \"subject\": \"test\"}, \"19u06q\": {\"date\": \"2022-11-20T19:21:19.387000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/19u06q\", \"id\": \"19u06q\", \"subject\": \"invitation to moderate /r/<TEST_SUBREDDIT>\"}, \"1mahgy\": {\"date\": \"2023-07-11T20:32:00.840000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/1mahgy\", \"id\": \"1mahgy\", \"subject\": \"test\"}, \"fjhnq\": {\"date\": \"2020-07-16T01:15:07.219000+0000\", \"permalink\": \"https://mod.reddit.com/mail/perma/fjhnq\", \"id\": \"fjhnq\", \"subject\": \"Spam\"}}, \"id\": \"t2_75u2lqkb\"}, \"modActions\": {}}"
+ },
+ "headers": {
+ "Accept-Ranges": [
+ "bytes"
+ ],
+ "Connection": [
+ "keep-alive"
+ ],
+ "Content-Length": [
+ "5735"
+ ],
+ "Date": [
+ "Tue, 11 Jul 2023 20:33:14 GMT"
+ ],
+ "NEL": [
+ "{\"report_to\": \"w3-reporting-nel\", \"max_age\": 14400, \"include_subdomains\": false, \"success_fraction\": 1.0, \"failure_fraction\": 1.0}"
+ ],
+ "Report-To": [
+ "{\"group\": \"w3-reporting-nel\", \"max_age\": 14400, \"include_subdomains\": true, \"endpoints\": [{ \"url\": \"https://w3-reporting-nel.reddit.com/reports\" }]}, {\"group\": \"w3-reporting\", \"max_age\": 14400, \"include_subdomains\": true, \"endpoints\": [{ \"url\": \"https://w3-reporting.reddit.com/reports\" }]}, {\"group\": \"w3-reporting-csp\", \"max_age\": 14400, \"include_subdomains\": true, \"endpoints\": [{ \"url\": \"https://w3-reporting-csp.reddit.com/reports\" }]}"
+ ],
+ "Server": [
+ "snooserv"
+ ],
+ "Strict-Transport-Security": [
+ "max-age=31536000; includeSubdomains"
+ ],
+ "Vary": [
+ "accept-encoding"
+ ],
+ "Via": [
+ "1.1 varnish"
+ ],
+ "X-Content-Type-Options": [
+ "nosniff"
+ ],
+ "X-Frame-Options": [
+ "SAMEORIGIN"
+ ],
+ "X-XSS-Protection": [
+ "1; mode=block"
+ ],
+ "cache-control": [
+ "private, s-maxage=0, max-age=0, must-revalidate, no-store"
+ ],
+ "content-type": [
+ "application/json; charset=UTF-8"
+ ],
+ "expires": [
+ "-1"
+ ],
+ "set-cookie": [
+ "loid=000000000075u2lqkb.2.1593898363221.Z0FBQUFBQmtyYnlLd2NOTXNkaDdPRzNNU3NVZkdtbVlKNndTaHk2bWs2NjI0NXlqdHZEZlhTWGVhWHU3UVBOODJ2Y28ydXJqNG5Ydll4a0ZqbGxrT3ZzWkl1d1QzUzdWLURhZnZSemsxSFNyeG1lMGlOSDM2NVF3akw1bHNpd3A0VnFPeEFxbjFzWWQ; Domain=reddit.com; Max-Age=63071999; Path=/; expires=Thu, 10-Jul-2025 20:33:14 GMT; secure",
+ "session_tracker=coofkkigherfoqbqdh.0.1689107594576.Z0FBQUFBQmtyYnlLdGM1Q1lGb2xvbFJWMUlnNUlHWDN4MjFZdldXZUhLUEZmT3NEdVAxVy1YTzd0c2EzQjNsZ215SEpxYlRXdlhMU0o1UWlpbHZreTBGNUdvd1lEOGI2RWNzZmRmMktFTzQ1S1R4bG0xcVRIMmFfVHpGYnc3dXBoRHhEUmtkLTk3TGg; Domain=reddit.com; Max-Age=7199; Path=/; expires=Tue, 11-Jul-2023 22:33:14 GMT; secure"
+ ],
+ "x-moose": [
+ "majestic"
+ ],
+ "x-ratelimit-remaining": [
+ "995"
+ ],
+ "x-ratelimit-reset": [
+ "406"
+ ],
+ "x-ratelimit-used": [
+ "1"
+ ]
+ },
+ "status": {
+ "code": 201,
+ "message": "Created"
+ },
+ "url": "https://oauth.reddit.com/api/mod/conversations/1mahha?raw_json=1"
+ }
+ }
+ ],
+ "recorded_with": "betamax/0.8.1"
+}
diff --git a/tests/integration/models/reddit/test_modmail.py b/tests/integration/models/reddit/test_modmail.py
index 88313df3..9875ff71 100644
--- a/tests/integration/models/reddit/test_modmail.py
+++ b/tests/integration/models/reddit/test_modmail.py
@@ -55,6 +55,12 @@ class TestModmailConversation(IntegrationTest):
reply = conversation.reply(body="A message")
assert isinstance(reply, ModmailMessage)
+ def test_reply__internal(self, reddit):
+ reddit.read_only = False
+ conversation = reddit.subreddit("all").modmail("1mahha")
+ reply = conversation.reply(internal=True, body="A message")
+ assert isinstance(reply, ModmailMessage)
+
def test_unarchive(self, reddit):
reddit.read_only = False
conversation = reddit.subreddit("all").modmail("ik72")
|
Modmail Conversations throw a TypeError when issuing a reply
### Describe the Bug
A TypeError is thrown for a modmail conversation if leaving an internal reply. The code we've been using to do this has been in place for months; the first observed error was at 2023-07-10 14:16:32 UTC
This appears to occur whether the reply is internal or to the user.
It is worth noting that the reply goes through; the error occurs after submitting the reply.
### Desired Result
A new modmail conversation is created, then an internal moderator note is left.
### Code to reproduce the bug
```Python
testingSub = "" #your subreddit name
testingUser = "" #the user you're sending the modmail to
conversation = reddit.subreddit(testingSub).modmail.create(subject="test", body="testing", recipient=testingUser, author_hidden=True)
conversation.reply(internal=True, body="additional test") #this is where the error happens
```
### The `Reddit()` initialization in my code example does not include the following parameters to prevent credential leakage:
`client_secret`, `password`, or `refresh_token`.
- [X] Yes
### Relevant Logs
```Shell
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 4, in test
File "/<redacted>/lib/python3.8/site-packages/praw/util/deprecate_args.py", line 43, in wrapped
return func(**dict(zip(_old_args, args)), **kwargs)
File "/<redacted>/lib/python3.8/site-packages/praw/models/reddit/modmail.py", line 265, in reply
message_id = response["conversation"]["objIds"][-1]["id"]
```
### This code has previously worked as intended.
Yes
### Operating System/Environment
Ubuntu 20.04.4 (WSL), almalinux8
### Python Version
3.8.10, 3.9.13
### PRAW Version
3.7
### Prawcore Version
2.3.0
### Anything else?
_No response_
|
0.0
|
8bf669309a1476fa995f99c87c895fadf5436563
|
[
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_reply__internal"
] |
[
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_archive",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_highlight",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_mute",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_mute_duration",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_read",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_read__other_conversations",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_reply",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_unarchive",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_unhighlight",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_unmute",
"tests/integration/models/reddit/test_modmail.py::TestModmailConversation::test_unread"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-11 20:39:06+00:00
|
bsd-2-clause
| 4,648
|
|
chapinb__chickadee-63
|
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 104b977..fc87bab 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,11 @@
# Changelog
+## 20200805.0
+
+### Fixed
+
+* Addressed bug in rate limiting VirusTotal. [Issue-62](https://github.com/chapinb/chickadee/issues/62)
+
## 20200802.0
### Added
diff --git a/doc_src/source/conf.py b/doc_src/source/conf.py
index ad6c81a..31e098a 100644
--- a/doc_src/source/conf.py
+++ b/doc_src/source/conf.py
@@ -22,7 +22,7 @@ copyright = 'MIT 2020, Chapin Bryce'
author = 'Chapin Bryce'
# The full version, including alpha/beta/rc tags
-release = '20200802'
+release = '20200805'
# -- General configuration ---------------------------------------------------
diff --git a/libchickadee/__init__.py b/libchickadee/__init__.py
index 202fa79..e7393a1 100644
--- a/libchickadee/__init__.py
+++ b/libchickadee/__init__.py
@@ -145,7 +145,7 @@ library from the command line.
"""
__author__ = 'Chapin Bryce'
-__date__ = 20200802
-__version__ = 20200802.0
+__date__ = 20200805
+__version__ = 20200805.0
__license__ = 'MIT Copyright 2020 Chapin Bryce'
__desc__ = '''Yet another GeoIP resolution tool.'''
diff --git a/libchickadee/chickadee.py b/libchickadee/chickadee.py
index fd12d15..d43deac 100644
--- a/libchickadee/chickadee.py
+++ b/libchickadee/chickadee.py
@@ -65,7 +65,7 @@ Usage
-V, --version Displays version
-l LOG, --log LOG Path to log file (default: chickadee.log)
- Built by Chapin Bryce, v.20200801.0
+ Built by Chapin Bryce, v.20200805.0
.. _chickadee-examples:
@@ -171,7 +171,7 @@ from libchickadee.parsers.evtx import EVTXParser
__author__ = 'Chapin Bryce'
-__date__ = 20200407.2
+__date__ = 20200805
__license__ = 'GPLv3 Copyright 2019 Chapin Bryce'
__desc__ = '''Yet another GeoIP resolution tool.
@@ -672,7 +672,7 @@ def arg_handling(args):
help='Include debug log messages')
parser.add_argument('-V', '--version', action='version',
help='Displays version',
- version=str(__date__))
+ version=str(__version__))
parser.add_argument(
'-l',
'--log',
diff --git a/libchickadee/resolvers/virustotal.py b/libchickadee/resolvers/virustotal.py
index 1c21411..e538089 100644
--- a/libchickadee/resolvers/virustotal.py
+++ b/libchickadee/resolvers/virustotal.py
@@ -84,7 +84,7 @@ from . import ResolverBase
logger = logging.getLogger(__name__)
__author__ = 'Chapin Bryce'
-__date__ = 20200302
+__date__ = 20200805
__license__ = 'MIT Copyright 2020 Chapin Bryce'
__desc__ = 'Resolver for VirusTotal'
@@ -165,6 +165,7 @@ class ProResolver(ResolverBase):
'ip': self.data
}
+ self.last_request = datetime.now()
rdata = requests.get(
self.uri, params=params
)
diff --git a/setup.py b/setup.py
index d4d126a..67ae1ca 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,6 @@
"""Installer for chickadee"""
import setuptools
-from libchickadee import __version__
+from libchickadee import __version__, __desc__, __author__
with open('README.md') as fh:
long_description = fh.read()
@@ -8,8 +8,8 @@ with open('README.md') as fh:
setuptools.setup(
name='chickadee',
version=__version__,
- description='Yet another GeoIP resolution tool.',
- author='Chapin Bryce',
+ description=__desc__,
+ author=__author__,
author_email='[email protected]',
url='https://github.com/chapinb/chickadee',
long_description=long_description,
|
chapinb/chickadee
|
fa9862c3ff48e6ad3e07b2ecaad74ff922eaf926
|
diff --git a/libchickadee/test/test_resolver_virustotal.py b/libchickadee/test/test_resolver_virustotal.py
index dfb54a1..c0dd7d4 100644
--- a/libchickadee/test/test_resolver_virustotal.py
+++ b/libchickadee/test/test_resolver_virustotal.py
@@ -1,5 +1,6 @@
"""VirusTotal Resolver Tests."""
import datetime
+import time
import unittest
import json
import os
@@ -8,7 +9,7 @@ from unittest.mock import patch, MagicMock
from libchickadee.resolvers.virustotal import ProResolver
__author__ = 'Chapin Bryce'
-__date__ = 20200114
+__date__ = 20200805
__license__ = 'MIT Copyright 2020 Chapin Bryce'
__desc__ = '''Yet another GeoIP resolution tool.'''
@@ -101,6 +102,16 @@ class IPAPITestCase(unittest.TestCase):
self.assertIsNone(actual)
self.assertEqual(mock_log.records[0].message, err_msg)
+ @patch("libchickadee.resolvers.virustotal.requests.get")
+ def test_sleeper(self, mock_requests):
+ initial_time = datetime.datetime.now()
+ self.resolver.last_request = initial_time
+ time.sleep(2)
+ mock_requests.return_value.status_code = 403
+
+ self.resolver.query(data='1.1.1.1')
+ self.assertGreaterEqual(self.resolver.last_request, initial_time + datetime.timedelta(seconds=2))
+
if __name__ == "__main__":
unittest.main()
|
VirusTotal rate limiter does not store last request time
**Describe the bug**
The sleeper function checks the last request time to determine how long to sleep for. This value is only set at initialization and does not update per-request.
**Version (please complete the following information):**
- OS: Any (macOS)
- Version: 20200802.0
- Python version: 3.7.7
**Additional context**
|
0.0
|
fa9862c3ff48e6ad3e07b2ecaad74ff922eaf926
|
[
"libchickadee/test/test_resolver_virustotal.py::IPAPITestCase::test_sleeper"
] |
[
"libchickadee/test/test_resolver_virustotal.py::IPAPITestCase::test_parse_vt_resp",
"libchickadee/test/test_resolver_virustotal.py::IPAPITestCase::test_parse_vt_resp_2",
"libchickadee/test/test_resolver_virustotal.py::IPAPITestCase::test_resolve_batch",
"libchickadee/test/test_resolver_virustotal.py::IPAPITestCase::test_resolve_errors",
"libchickadee/test/test_resolver_virustotal.py::IPAPITestCase::test_resolve_single"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-05 10:20:06+00:00
|
mit
| 1,557
|
|
odufrn__odufrn-downloader-95
|
diff --git a/.gitignore b/.gitignore
index 9593d84..ef43f41 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
Pipfile
Pipfile.lock
+env/
.vscode/*
.idea/*
venv/*
diff --git a/odufrn_downloader/exceptions.py b/odufrn_downloader/exceptions.py
new file mode 100644
index 0000000..e69e97b
--- /dev/null
+++ b/odufrn_downloader/exceptions.py
@@ -0,0 +1,10 @@
+class odufrException(Exception):
+ def __init__(self):
+ default_message = 'Default Exception!'
+ super().__init__()
+
+
+class odufrIOError(odufrException):
+ def __init__(self):
+ default_message = 'odufrIOError Exception!'
+ super().__init__()
diff --git a/odufrn_downloader/modules/File.py b/odufrn_downloader/modules/File.py
index 7eac2f4..1cdeec2 100644
--- a/odufrn_downloader/modules/File.py
+++ b/odufrn_downloader/modules/File.py
@@ -1,5 +1,6 @@
import os
from .Package import Package
+from odufrn_downloader.exceptions import odufrIOError
class File(Package):
@@ -34,5 +35,5 @@ class File(Package):
self.download_package(
packageName.rstrip(), path, dictionary, years
)
- except IOError as ex:
- self._print_exception(ex)
+ except IOError:
+ raise odufrIOError()
|
odufrn/odufrn-downloader
|
7ab1d9afb9f93ba620ee540d8e691c6ce3558271
|
diff --git a/tests/test_file.py b/tests/test_file.py
index bb21fb0..4bd5fa3 100644
--- a/tests/test_file.py
+++ b/tests/test_file.py
@@ -1,5 +1,6 @@
from .utils import *
import tempfile
+from odufrn_downloader.modules.File import odufrIOError
class Group(unittest.TestCase):
@@ -25,8 +26,6 @@ class Group(unittest.TestCase):
def test_can_print_exception_download_packages_from_file(self):
"""Verifica se dado um arquivo com nomes errados de pacotes
lança-se exceção."""
- assert_console(
- lambda: self.ufrn_data.download_from_file(
- 'potato', './tmp'
- )
- )
+
+ with self.assertRaises(odufrIOError):
+ self.ufrn_data.download_from_file('potato', './tmp')
|
Criar classes de exception
# Feature
Hoje tratamos as exceptions de forma bem geral, porém poderíamos criar classes de exceção para nos auxiliarmos a lidar com os erros. Isso ajuda bastante nos testes, tornando-os mais assertivos, visto que o unittest tem a função ` assertRaises() `
> #### Participe do Hacktoberfest!
> Contribua com uma issue com a label `hacktoberfest` e abra um pull request durante o mês de outubro para ganhar os brindes do GitHub! Para se inscrever, acesse https://hacktoberfest.digitalocean.com/register
|
0.0
|
7ab1d9afb9f93ba620ee540d8e691c6ce3558271
|
[
"tests/test_file.py::Group::test_can_download_packages_from_file",
"tests/test_file.py::Group::test_can_print_exception_download_packages_from_file"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-10-05 22:30:32+00:00
|
mit
| 4,331
|
|
paris-saclay-cds__specio-44
|
diff --git a/specio/core/functions.py b/specio/core/functions.py
index c47f79d..8185e05 100644
--- a/specio/core/functions.py
+++ b/specio/core/functions.py
@@ -144,7 +144,7 @@ def _validate_filenames(uri):
return sorted(glob.glob(os.path.expanduser(uri)))
-def _zip_spectrum(spectrum):
+def _zip_spectrum(spectrum, tol_wavelength):
"""Compress if possible several Spectrum into a single one.
Parameters
@@ -152,6 +152,10 @@ def _zip_spectrum(spectrum):
spectrum : list of Spectrum
The list of Spectrum to zip.
+ tol_wavelength : float
+ Tolerance to merge spectrum when their wavelength are slightly
+ different.
+
Returns
-------
zipped_spectrum : Spectrum or list of Spectrum
@@ -166,7 +170,8 @@ def _zip_spectrum(spectrum):
wavelength = spectrum[0].wavelength
try:
consistent_wavelength = [np.allclose(sp.wavelength,
- wavelength)
+ wavelength,
+ atol=tol_wavelength)
for sp in spectrum]
if not all(consistent_wavelength):
return spectrum
@@ -194,7 +199,7 @@ def _zip_spectrum(spectrum):
return output_spectrum
-def specread(uri, format=None, **kwargs):
+def specread(uri, format=None, tol_wavelength=1e-5, **kwargs):
"""Read spectra in a given format.
Reads spectrum from the specified file. Returns a list or a
@@ -215,6 +220,10 @@ def specread(uri, format=None, **kwargs):
The format to use to read the file. By default specio selects
the appropriate for you based on the filename and its contents.
+ tol_wavelength : float, optional
+ Tolerance to merge spectrum when their wavelength are slightly
+ different.
+
kwargs : dict
Further keyword arguments are passed to the reader. See :func:`.help`
to see what arguments are available for a particular format.
@@ -241,7 +250,7 @@ def specread(uri, format=None, **kwargs):
spectrum = _get_reader_get_data(uri, format, **kwargs)
if isinstance(spectrum, list):
- spectrum = _zip_spectrum(spectrum)
+ spectrum = _zip_spectrum(spectrum, tol_wavelength)
return spectrum
|
paris-saclay-cds/specio
|
e966bc2b7f0955631517780272b8ebd62f6c6a1b
|
diff --git a/specio/core/tests/test_functions.py b/specio/core/tests/test_functions.py
index 0cfd489..049cdbb 100644
--- a/specio/core/tests/test_functions.py
+++ b/specio/core/tests/test_functions.py
@@ -87,14 +87,25 @@ def _generate_list_spectrum(*args):
for _ in range(n_spectrum)]
+def _generate_list_spectrum_close_wavelength(*args):
+ n_wavelength = 5
+ tol = 1e-3
+ wavelength = np.arange(5) + np.random.uniform(low=-tol, high=tol)
+ return Spectrum(np.random.random(n_wavelength),
+ wavelength,
+ None)
+
+
@pytest.mark.parametrize(
- "side_effect,spectra_type,spectra_shape",
- [(_generate_spectrum_identical_wavelength, Spectrum, (10, 5)),
- (_generate_spectrum_different_wavelength_size, list, 10),
- (_generate_spectrum_different_wavelength, list, 10),
- (_generate_list_spectrum, list, 30)])
-def test_specread_consitent_wavelength(side_effect, spectra_type,
- spectra_shape, mocker):
+ "side_effect,tol_wavelength,spectra_type,spectra_shape",
+ [(_generate_spectrum_identical_wavelength, 1e-5, Spectrum, (10, 5)),
+ (_generate_spectrum_different_wavelength_size, 1e-5, list, 10),
+ (_generate_spectrum_different_wavelength, 1e-5, list, 10),
+ (_generate_list_spectrum, 1e-5, list, 30),
+ (_generate_list_spectrum_close_wavelength, 1e-2, Spectrum, (10, 5)),
+ (_generate_list_spectrum_close_wavelength, 1e-5, list, 10)])
+def test_specread_consitent_wavelength(side_effect, tol_wavelength,
+ spectra_type, spectra_shape, mocker):
# emulate that we read several file
mocker.patch('specio.core.functions._validate_filenames',
return_value=['filename' for _ in range(10)])
@@ -103,7 +114,7 @@ def test_specread_consitent_wavelength(side_effect, spectra_type,
side_effect=side_effect)
# emulate the spectrum reading
- spectra = specread('')
+ spectra = specread('', tol_wavelength=tol_wavelength)
assert isinstance(spectra, spectra_type)
if isinstance(spectra, Spectrum):
assert spectra.amplitudes.shape == spectra_shape
|
Add an argument to read_csv to merge wavelength
|
0.0
|
e966bc2b7f0955631517780272b8ebd62f6c6a1b
|
[
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_identical_wavelength-1e-05-Spectrum-spectra_shape0]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_different_wavelength_size-1e-05-list-10]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_spectrum_different_wavelength-1e-05-list-10]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum-1e-05-list-30]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum_close_wavelength-0.01-Spectrum-spectra_shape4]",
"specio/core/tests/test_functions.py::test_specread_consitent_wavelength[_generate_list_spectrum_close_wavelength-1e-05-list-10]"
] |
[
"specio/core/tests/test_functions.py::test_help",
"specio/core/tests/test_functions.py::test_get_reader",
"specio/core/tests/test_functions.py::test_specread_single_file"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-02-01 18:25:43+00:00
|
bsd-3-clause
| 4,458
|
|
bryanyang0528__ksql-python-73
|
diff --git a/ksql/api.py b/ksql/api.py
index f4a6bbb..b5f1f8f 100644
--- a/ksql/api.py
+++ b/ksql/api.py
@@ -101,7 +101,7 @@ class BaseAPI(object):
headers = {"Accept": "application/json", "Content-Type": "application/json"}
if self.api_key and self.secret:
- base64string = base64.b64encode("{}:{}".format(self.api_key, self.secret))
+ base64string = base64.b64encode(bytes("{}:{}".format(self.api_key, self.secret), "utf-8"))
headers["Authorization"] = "Basic {}" % base64string
req = urllib.request.Request(url=url, data=data, headers=headers, method=method.upper())
|
bryanyang0528/ksql-python
|
dbd864e2f424805a7c3170dbdfe3723fe7aea403
|
diff --git a/tests/test_client.py b/tests/test_client.py
index 42a89db..6e9075f 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -53,6 +53,13 @@ class TestKSQLAPI(unittest.TestCase):
property = [i for i in properties if i["name"] == "ksql.schema.registry.url"][0]
self.assertEqual(property.get("value"), "http://schema-registry:8081")
+ @vcr.use_cassette("tests/vcr_cassettes/ksql_show_table_with_api_key.yml")
+ def test_ksql_show_tables_with_api_key(self):
+ api_client = KSQLAPI(url=self.url, check_version=False, api_key='foo', secret='bar')
+ ksql_string = "show tables;"
+ r = api_client.ksql(ksql_string)
+ self.assertEqual(r, [{"@type": "tables", "statementText": "show tables;", "tables": [], "warnings": []}])
+
@vcr.use_cassette("tests/vcr_cassettes/ksql_show_table.yml")
def test_ksql_show_tables(self):
""" Test GET requests """
diff --git a/tests/unit-tests/test_api.py b/tests/unit-tests/test_api.py
index 0bce5d4..98a2c60 100644
--- a/tests/unit-tests/test_api.py
+++ b/tests/unit-tests/test_api.py
@@ -1,5 +1,6 @@
import unittest
import responses
+import urllib
from ksql.api import BaseAPI
diff --git a/tests/vcr_cassettes/ksql_show_table_with_api_key.yml b/tests/vcr_cassettes/ksql_show_table_with_api_key.yml
new file mode 100644
index 0000000..df994fc
--- /dev/null
+++ b/tests/vcr_cassettes/ksql_show_table_with_api_key.yml
@@ -0,0 +1,34 @@
+interactions:
+- request:
+ body: '{"ksql": "show tables;"}'
+ headers:
+ Accept:
+ - application/json
+ Authorization:
+ - Basic {}
+ Connection:
+ - close
+ Content-Length:
+ - '24'
+ Content-Type:
+ - application/json
+ Host:
+ - localhost:8088
+ User-Agent:
+ - Python-urllib/3.8
+ method: POST
+ uri: http://localhost:8088/ksql
+ response:
+ body:
+ string: '[{"@type":"tables","statementText":"show tables;","tables":[],"warnings":[]}]'
+ headers:
+ connection:
+ - close
+ content-length:
+ - '77'
+ content-type:
+ - application/json
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/vcr_cassettes_backup/bad_requests.yml b/tests/vcr_cassettes_backup/bad_requests.yml
deleted file mode 100644
index 024ac7a..0000000
--- a/tests/vcr_cassettes_backup/bad_requests.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "noi;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['16']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"generic_error","error_code":40000,"message":"line 1:1:
- mismatched input ''noi'' expecting {<EOF>, ''('', ''SELECT'', ''VALUES'',
- ''CREATE'', ''REGISTER'', ''TABLE'', ''INSERT'', ''DESCRIBE'', ''PRINT'',
- ''EXPLAIN'', ''SHOW'', ''LIST'', ''TERMINATE'', ''LOAD'', ''DROP'', ''SET'',
- ''EXPORT'', ''UNSET'', ''RUN''}\nCaused by: org.antlr.v4.runtime.InputMismatchException","stack_trace":["io.confluent.ksql.parser.KsqlParser.buildAst(KsqlParser.java:66)","io.confluent.ksql.KsqlEngine.getStatements(KsqlEngine.java:497)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:171)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 21:19:08 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/get_ksql_server.yml b/tests/vcr_cassettes_backup/get_ksql_server.yml
deleted file mode 100644
index e0a3c83..0000000
--- a/tests/vcr_cassettes_backup/get_ksql_server.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-interactions:
-- request:
- body: null
- headers:
- Accept: ['*/*']
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- User-Agent: [python-requests/2.19.1]
- method: GET
- uri: http://localhost:8088/info
- response:
- body: {string: '{"KsqlServerInfo":{"version":"5.0.0-SNAPSHOT","kafkaClusterId":"9HvFRIoMSyy1YUxjpOt-gg","ksqlServiceId":"default_"}}'}
- headers:
- Content-Type: [application/vnd.ksql.v1+json]
- Date: ['Fri, 20 Jul 2018 20:08:04 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/get_properties.yml b/tests/vcr_cassettes_backup/get_properties.yml
deleted file mode 100644
index 0ffd904..0000000
--- a/tests/vcr_cassettes_backup/get_properties.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "show properties;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['28']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"properties","statementText":"show properties;","properties":{"ksql.extension.dir":"ext","ksql.streams.cache.max.bytes.buffering":"10000000","ksql.transient.prefix":"transient_","ksql.schema.registry.url":"http://localhost:8081","ssl.secure.random.implementation":null,"ksql.streams.default.deserialization.exception.handler":"io.confluent.ksql.errors.LogMetricAndContinueExceptionHandler","ksql.output.topic.name.prefix":"","ksql.streams.auto.offset.reset":"latest","ksql.sink.partitions":"4","ssl.keystore.type":"JKS","ssl.trustmanager.algorithm":"PKIX","ksql.statestore.suffix":"_ksql_statestore","ssl.key.password":null,"ksql.service.id":"default_","ssl.truststore.password":null,"ssl.endpoint.identification.algorithm":"https","ksql.streams.bootstrap.servers":"localhost:29092","ssl.protocol":"TLS","ksql.streams.commit.interval.ms":"2000","ksql.sink.replicas":"1","ssl.provider":null,"ssl.enabled.protocols":"TLSv1.2,TLSv1.1,TLSv1","ssl.keystore.location":null,"ksql.streams.num.stream.threads":"4","ssl.cipher.suites":null,"ssl.truststore.type":"JKS","ksql.udfs.enabled":"true","ssl.truststore.location":null,"ksql.udf.enable.security.manager":"true","ssl.keystore.password":null,"ssl.keymanager.algorithm":"SunX509","ksql.streams.application.id":"KSQL_REST_SERVER_DEFAULT_APP_ID","ksql.sink.window.change.log.additional.retention":"1000000","ksql.udf.collect.metrics":"false","ksql.persistent.prefix":"query_"},"overwrittenProperties":[]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:06 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/healthcheck.yml b/tests/vcr_cassettes_backup/healthcheck.yml
deleted file mode 100644
index 95b7873..0000000
--- a/tests/vcr_cassettes_backup/healthcheck.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-interactions:
-- request:
- body: null
- headers:
- Accept: ['*/*']
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- User-Agent: [python-requests/2.19.1]
- method: GET
- uri: http://localhost:8088/status
- response:
- body: {string: '{"commandStatuses":{"stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/create":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/create":"SUCCESS","stream/TEST_TABLE/drop":"SUCCESS","stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/drop":"SUCCESS","stream/CREATE_STREAM_AS_WITHOUT_CONDITIONS/create":"SUCCESS","stream/PAGEVIEWS_ORIGINAL/drop":"SUCCESS","stream/KSQL_PYTHON_TESTTEST_KSQL_CREATE_STREAM/create":"SUCCESS","stream/TEST_TABLE/create":"SUCCESS","stream/PREBID_TRAFFIC_LOG_VALID_STREAM/drop":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH/create":"SUCCESS","stream/PAGEVIEWS_ORIGINAL/create":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND/create":"SUCCESS","stream/PREBID_TRAFFIC_LOG_VALID_STREAM/create":"ERROR","stream/KSQL_PYTHON_TESTTEST_KSQL_CREATE_STREAM/drop":"SUCCESS","stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/drop":"QUEUED"}}'}
- headers:
- Content-Type: [application/vnd.ksql.v1+json]
- Date: ['Fri, 20 Jul 2018 20:10:10 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream.yml b/tests/vcr_cassettes_backup/ksql_create_stream.yml
deleted file mode 100644
index e40a8cc..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_testtest_ksql_create_stream (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['198']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_testtest_ksql_create_stream
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TESTTEST_KSQL_CREATE_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:09 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith.yml
deleted file mode 100644
index 642cc7b..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith.yml
+++ /dev/null
@@ -1,132 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED pageviews_original;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['49']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''PAGEVIEWS_ORIGINAL'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED pageviews_original;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:41 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n pageviews_original;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['63']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n pageviews_original;","commandId":"stream/PAGEVIEWS_ORIGINAL/drop","commandStatus":{"status":"SUCCESS","message":"Source
- PAGEVIEWS_ORIGINAL does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:41 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED create_stream_as_with_conditions_with_startwith;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH''
- in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED create_stream_as_with_conditions_with_startwith;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:42 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n create_stream_as_with_conditions_with_startwith;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['92']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n create_stream_as_with_conditions_with_startwith;","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/drop","commandStatus":{"status":"SUCCESS","message":"Source
- CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:42 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream pageviews_original
- (name string, age bigint, userid string, pageid bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/PAGEVIEWS_ORIGINAL/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:42 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_with_conditions_with_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original
- where userid = ''foo_%'';"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['269']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_with_conditions_with_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original
- where userid = ''foo_%'';","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:43 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith_with_and.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith_with_and.yml
deleted file mode 100644
index 62d4a1d..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_with_startwith_with_and.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream pageviews_original (name string, age bigint, userid string, pageid
- bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:50 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_with_conditions_with_startwith_with_and
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith_with_and'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original where userid = ''foo_%'' and age > 10;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['300']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_with_conditions_with_startwith_with_and
- WITH (kafka_topic=''create_stream_as_with_conditions_with_startwith_with_and'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original where userid = ''foo_%'' and age > 10;","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:50 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_without_startwith.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_without_startwith.yml
deleted file mode 100644
index 96aa9fb..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_conditions_without_startwith.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream pageviews_original (name string, age bigint, userid string, pageid
- bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:56 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_with_conditions_without_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_without_startwith'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original
- where userid = ''foo'';"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['273']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_with_conditions_without_startwith
- WITH (kafka_topic=''create_stream_as_with_conditions_without_startwith'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original where userid = ''foo'';","commandId":"stream/CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:07:56 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_wrong_timestamp.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_with_wrong_timestamp.yml
deleted file mode 100644
index c18ad24..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_with_wrong_timestamp.yml
+++ /dev/null
@@ -1,145 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED prebid_traffic_log_total_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['62']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''PREBID_TRAFFIC_LOG_TOTAL_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED prebid_traffic_log_total_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:10 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n prebid_traffic_log_total_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['76']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n prebid_traffic_log_total_stream;","commandId":"stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- PREBID_TRAFFIC_LOG_TOTAL_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED prebid_traffic_log_valid_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['62']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''PREBID_TRAFFIC_LOG_VALID_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED prebid_traffic_log_valid_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n prebid_traffic_log_valid_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['76']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n prebid_traffic_log_valid_stream;","commandId":"stream/PREBID_TRAFFIC_LOG_VALID_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- PREBID_TRAFFIC_LOG_VALID_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream prebid_traffic_log_total_stream (name string, age
- bigint, userid string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['173']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream prebid_traffic_log_total_stream
- (name string, age bigint, userid string, pageid bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/PREBID_TRAFFIC_LOG_TOTAL_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream prebid_traffic_log_valid_stream WITH (kafka_topic=''prebid_traffic_log_valid_topic'',
- value_format=''DELIMITED'', timestamp=''foo'') AS SELECT * FROM prebid_traffic_log_total_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['202']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream prebid_traffic_log_valid_stream
- WITH (kafka_topic=''prebid_traffic_log_valid_topic'', value_format=''DELIMITED'',
- timestamp=''foo'') AS SELECT * FROM prebid_traffic_log_total_stream;","commandId":"stream/PREBID_TRAFFIC_LOG_VALID_STREAM/create","commandStatus":{"status":"ERROR","message":"io.confluent.ksql.util.KsqlException:
- No column with the provided timestamp column name in the WITH clause, FOO,
- exists in the defined schema.\n\tat io.confluent.ksql.util.timestamp.TimestampExtractionPolicyFactory.lambda$create$0(TimestampExtractionPolicyFactory.java:41)\n\tat
- java.util.Optional.orElseThrow(Optional.java:290)\n\tat io.confluent.ksql.util.timestamp.TimestampExtractionPolicyFactory.create(TimestampExtractionPolicyFactory.java:41)\n\tat
- io.confluent.ksql.planner.LogicalPlanner.getTimestampExtractionPolicy(LogicalPlanner.java:126)\n\tat
- io.confluent.ksql.planner.LogicalPlanner.buildOutputNode(LogicalPlanner.java:93)\n\tat
- io.confluent.ksql.planner.LogicalPlanner.buildPlan(LogicalPlanner.java:83)\n\tat
- io.confluent.ksql.QueryEngine.buildQueryLogicalPlan(QueryEngine.java:118)\n\tat
- io.confluent.ksql.QueryEngine.buildLogicalPlans(QueryEngine.java:90)\n\tat
- io.confluent.ksql.KsqlEngine.planQueries(KsqlEngine.java:221)\n\tat io.confluent.ksql.KsqlEngine.buildMultipleQueries(KsqlEngine.java:211)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.startQuery(StatementExecutor.java:372)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.handleCreateAsSelect(StatementExecutor.java:317)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.executeStatement(StatementExecutor.java:234)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.handleStatementWithTerminatedQueries(StatementExecutor.java:206)\n\tat
- io.confluent.ksql.rest.server.computation.StatementExecutor.handleStatement(StatementExecutor.java:112)\n\tat
- io.confluent.ksql.rest.server.computation.CommandRunner.executeStatement(CommandRunner.java:105)\n\tat
- io.confluent.ksql.rest.server.computation.CommandRunner.fetchAndRunCommands(CommandRunner.java:88)\n\tat
- io.confluent.ksql.rest.server.computation.CommandRunner.run(CommandRunner.java:63)\n\tat
- java.lang.Thread.run(Thread.java:748)\n"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:12 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_as_without_conditions.yml b/tests/vcr_cassettes_backup/ksql_create_stream_as_without_conditions.yml
deleted file mode 100644
index 1fb4cde..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_as_without_conditions.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream pageviews_original (name string, age bigint, userid
- string, pageid bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['160']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream pageviews_original (name string, age bigint, userid string, pageid
- bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:00 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "CREATE stream create_stream_as_without_conditions WITH (kafka_topic=''create_stream_as_without_conditions'',
- value_format=''DELIMITED'', timestamp=''logtime'') AS SELECT rowtime as logtime,
- * FROM pageviews_original;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['222']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream create_stream_as_without_conditions
- WITH (kafka_topic=''create_stream_as_without_conditions'', value_format=''DELIMITED'',
- timestamp=''logtime'') AS SELECT rowtime as logtime, * FROM pageviews_original;","commandId":"stream/CREATE_STREAM_AS_WITHOUT_CONDITIONS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:01 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder.yml b/tests/vcr_cassettes_backup/ksql_create_stream_by_builder.yml
deleted file mode 100644
index d78aae9..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder.yml
+++ /dev/null
@@ -1,65 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['41']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''TEST_TABLE'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED test_table;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['55']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n test_table;","commandId":"stream/TEST_TABLE/drop","commandStatus":{"status":"SUCCESS","message":"Source
- TEST_TABLE does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream test_table (viewtime bigint, userid varchar, pageid
- varchar) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['146']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream test_table
- (viewtime bigint, userid varchar, pageid varchar) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/TEST_TABLE/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder_api.yml b/tests/vcr_cassettes_backup/ksql_create_stream_by_builder_api.yml
deleted file mode 100644
index 647f201..0000000
--- a/tests/vcr_cassettes_backup/ksql_create_stream_by_builder_api.yml
+++ /dev/null
@@ -1,63 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['41']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- test_table;","sourceDescription":{"name":"TEST_TABLE","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:16 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n test_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['55']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n test_table;","commandId":"stream/TEST_TABLE/drop","commandStatus":{"status":"SUCCESS","message":"Source
- TEST_TABLE was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:16 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream test_table (viewtime bigint, userid varchar, pageid
- varchar) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['146']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream test_table
- (viewtime bigint, userid varchar, pageid varchar) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","commandId":"stream/TEST_TABLE/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:08:16 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_show_table.yml b/tests/vcr_cassettes_backup/ksql_show_table.yml
deleted file mode 100644
index 0d2d6fb..0000000
--- a/tests/vcr_cassettes_backup/ksql_show_table.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "show tables;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['24']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"tables","statementText":"show tables;","tables":[]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:11 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/ksql_topic_already_registered.yml b/tests/vcr_cassettes_backup/ksql_topic_already_registered.yml
deleted file mode 100644
index 6ecb9fe..0000000
--- a/tests/vcr_cassettes_backup/ksql_topic_already_registered.yml
+++ /dev/null
@@ -1,87 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED foo_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['40']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''FOO_TABLE'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED foo_table;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:14 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n foo_table;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['54']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n foo_table;","commandId":"stream/FOO_TABLE/drop","commandStatus":{"status":"SUCCESS","message":"Source
- FOO_TABLE does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:14 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream foo_table (name string, age bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['121']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE stream foo_table
- (name string, age bigint) WITH (kafka_topic=''exist_topic'', value_format=''DELIMITED'');","commandId":"stream/FOO_TABLE/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:14 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE stream foo_table (name string, age bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['121']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Topic
- already registered.","stack_trace":["io.confluent.ksql.ddl.commands.RegisterTopicCommand.run(RegisterTopicCommand.java:99)","io.confluent.ksql.ddl.commands.CreateStreamCommand.run(CreateStreamCommand.java:43)","io.confluent.ksql.ddl.commands.DdlCommandExec.executeOnMetaStore(DdlCommandExec.java:61)","io.confluent.ksql.ddl.commands.DdlCommandExec.execute(DdlCommandExec.java:54)","io.confluent.ksql.rest.server.resources.KsqlResource.executeDdlCommand(KsqlResource.java:783)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:716)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream foo_table (name string, age bigint) WITH (kafka_topic=''exist_topic'',
- value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:15 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/raise_create_error_no_topic.yml b/tests/vcr_cassettes_backup/raise_create_error_no_topic.yml
deleted file mode 100644
index 3cbc375..0000000
--- a/tests/vcr_cassettes_backup/raise_create_error_no_topic.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "CREATE stream foo_table (name string, age bigint) WITH (kafka_topic=''this_topic_is_not_exist'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['133']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Kafka
- topic does not exist: this_topic_is_not_exist","stack_trace":["io.confluent.ksql.ddl.commands.AbstractCreateStreamCommand.registerTopicFirst(AbstractCreateStreamCommand.java:184)","io.confluent.ksql.ddl.commands.AbstractCreateStreamCommand.<init>(AbstractCreateStreamCommand.java:81)","io.confluent.ksql.ddl.commands.CreateStreamCommand.<init>(CreateStreamCommand.java:34)","io.confluent.ksql.rest.server.resources.KsqlResource.lambda$registerDdlCommandTasks$20(KsqlResource.java:713)","io.confluent.ksql.rest.server.resources.KsqlResource.getStatementExecutionPlan(KsqlResource.java:635)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:258)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"CREATE
- stream foo_table (name string, age bigint) WITH (kafka_topic=''this_topic_is_not_exist'',
- value_format=''DELIMITED'');","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:13 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_drop_all_streams.yml b/tests/vcr_cassettes_backup/utils_test_drop_all_streams.yml
deleted file mode 100644
index e3dd6b7..0000000
--- a/tests/vcr_cassettes_backup/utils_test_drop_all_streams.yml
+++ /dev/null
@@ -1,144 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['69']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_all_streams;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:17 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['83']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_all_streams;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:17 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_all_streams (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['215']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_all_streams
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:17 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "SHOW STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['25']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"streams","statementText":"SHOW STREAMS;","streams":[{"type":"STREAM","name":"TEST_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH","topic":"create_stream_as_with_conditions_without_startwith","format":"DELIMITED"},{"type":"STREAM","name":"PREBID_TRAFFIC_LOG_TOTAL_STREAM","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"PAGEVIEWS_ORIGINAL","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITHOUT_CONDITIONS","topic":"create_stream_as_without_conditions","format":"DELIMITED"},{"type":"STREAM","name":"KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS","topic":"ksql_python_test_exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"FOO_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND","topic":"create_stream_as_with_conditions_with_startwith_with_and","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH","topic":"create_stream_as_with_conditions_with_startwith","format":"DELIMITED"}]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['69']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['83']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['69']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_ALL_STREAMS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_all_streams;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_drop_stream.yml b/tests/vcr_cassettes_backup/utils_test_drop_stream.yml
deleted file mode 100644
index b329d22..0000000
--- a/tests/vcr_cassettes_backup/utils_test_drop_stream.yml
+++ /dev/null
@@ -1,145 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:18 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_stream (viewtime bigint,
- userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['209']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_stream
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:19 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_drop_stream_create_as_stream.yml b/tests/vcr_cassettes_backup/utils_test_drop_stream_create_as_stream.yml
deleted file mode 100644
index b8c8de2..0000000
--- a/tests/vcr_cassettes_backup/utils_test_drop_stream_create_as_stream.yml
+++ /dev/null
@@ -1,187 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['64']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['78']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_stream (viewtime bigint,
- userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['209']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_stream
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_drop_stream_as as select
- * from ksql_python_test_test_drop_stream;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['114']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_drop_stream_as
- as select * from ksql_python_test_test_drop_stream;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:21 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['67']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream_as;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","readQueries":[],"writeQueries":[{"sinks":["KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS"],"id":"CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4","queryString":"CREATE
- STREAM ksql_python_test_test_drop_stream_as as select * from ksql_python_test_test_drop_stream;"}],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","partitions":4,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:23 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['67']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_drop_stream_as;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","readQueries":[],"writeQueries":[{"sinks":["KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS"],"id":"CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4","queryString":"CREATE
- STREAM ksql_python_test_test_drop_stream_as as select * from ksql_python_test_test_drop_stream;"}],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS","partitions":4,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:24 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "TERMINATE CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['66']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"TERMINATE CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4;","commandId":"terminate/CSAS_KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS_4/execute","commandStatus":{"status":"QUEUED","message":"Statement
- written to command topic"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:24 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['81']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_drop_stream_as;","commandId":"stream/KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS was dropped. "}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:29 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_drop_stream_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['67']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_DROP_STREAM_AS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_drop_stream_as;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:30 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_get_all_streams.yml b/tests/vcr_cassettes_backup/utils_test_get_all_streams.yml
deleted file mode 100644
index 3dfbca3..0000000
--- a/tests/vcr_cassettes_backup/utils_test_get_all_streams.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['68']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_get_all_streams;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:31 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_get_all_streams;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['82']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_get_all_streams;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:31 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_all_streams (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['214']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_all_streams
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:31 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "SHOW STREAMS;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['25']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"streams","statementText":"SHOW STREAMS;","streams":[{"type":"STREAM","name":"TEST_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITHOUT_STARTWITH","topic":"create_stream_as_with_conditions_without_startwith","format":"DELIMITED"},{"type":"STREAM","name":"PREBID_TRAFFIC_LOG_TOTAL_STREAM","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"KSQL_PYTHON_TEST_TEST_GET_ALL_STREAMS","topic":"ksql_python_test_exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"PAGEVIEWS_ORIGINAL","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITHOUT_CONDITIONS","topic":"create_stream_as_without_conditions","format":"DELIMITED"},{"type":"STREAM","name":"FOO_TABLE","topic":"exist_topic","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH_WITH_AND","topic":"create_stream_as_with_conditions_with_startwith_with_and","format":"DELIMITED"},{"type":"STREAM","name":"CREATE_STREAM_AS_WITH_CONDITIONS_WITH_STARTWITH","topic":"create_stream_as_with_conditions_with_startwith","format":"DELIMITED"}]}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:32 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_get_dependent_queries.yml b/tests/vcr_cassettes_backup/utils_test_get_dependent_queries.yml
deleted file mode 100644
index 46a1d57..0000000
--- a/tests/vcr_cassettes_backup/utils_test_get_dependent_queries.yml
+++ /dev/null
@@ -1,108 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_dependent_queries;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['74']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES'' in the
- Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_get_dependent_queries;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_get_dependent_queries;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['88']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_get_dependent_queries;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_dependent_queries (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['219']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_dependent_queries
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_dependent_queries_as
- as select * from ksql_python_test_test_get_dependent_queries;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['134']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_dependent_queries_as
- as select * from ksql_python_test_test_get_dependent_queries;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created and running"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:33 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_dependent_queries_as;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['77']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_get_dependent_queries_as;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS","readQueries":[],"writeQueries":[{"sinks":["KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS"],"id":"CSAS_KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS_5","queryString":"CREATE
- STREAM ksql_python_test_test_get_dependent_queries_as as select * from ksql_python_test_test_get_dependent_queries;"}],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"KSQL_PYTHON_TEST_TEST_GET_DEPENDENT_QUERIES_AS","partitions":4,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:35 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
diff --git a/tests/vcr_cassettes_backup/utils_test_get_stream_info.yml b/tests/vcr_cassettes_backup/utils_test_get_stream_info.yml
deleted file mode 100644
index a6378fb..0000000
--- a/tests/vcr_cassettes_backup/utils_test_get_stream_info.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-interactions:
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_stream_info;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['68']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '{"@type":"statement_error","error_code":40001,"message":"Could
- not find STREAM/TABLE ''KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO'' in the Metastore","stack_trace":["io.confluent.ksql.rest.server.resources.KsqlResource.describe(KsqlResource.java:457)","io.confluent.ksql.rest.server.resources.KsqlResource.validateStatement(KsqlResource.java:248)","io.confluent.ksql.rest.server.resources.KsqlResource.handleKsqlStatements(KsqlResource.java:190)","sun.reflect.GeneratedMethodAccessor6.invoke(Unknown
- Source)","sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)","java.lang.reflect.Method.invoke(Method.java:498)","org.glassfish.jersey.server.model.internal.ResourceMethodInvocationHandlerFactory.lambda$static$0(ResourceMethodInvocationHandlerFactory.java:76)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher$1.run(AbstractJavaResourceMethodDispatcher.java:148)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.invoke(AbstractJavaResourceMethodDispatcher.java:191)","org.glassfish.jersey.server.model.internal.JavaResourceMethodDispatcherProvider$ResponseOutInvoker.doDispatch(JavaResourceMethodDispatcherProvider.java:200)","org.glassfish.jersey.server.model.internal.AbstractJavaResourceMethodDispatcher.dispatch(AbstractJavaResourceMethodDispatcher.java:103)","org.glassfish.jersey.server.model.ResourceMethodInvoker.invoke(ResourceMethodInvoker.java:493)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:415)","org.glassfish.jersey.server.model.ResourceMethodInvoker.apply(ResourceMethodInvoker.java:104)","org.glassfish.jersey.server.ServerRuntime$1.run(ServerRuntime.java:277)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:272)","org.glassfish.jersey.internal.Errors$1.call(Errors.java:268)","org.glassfish.jersey.internal.Errors.process(Errors.java:316)","org.glassfish.jersey.internal.Errors.process(Errors.java:298)","org.glassfish.jersey.internal.Errors.process(Errors.java:268)","org.glassfish.jersey.process.internal.RequestScope.runInScope(RequestScope.java:289)","org.glassfish.jersey.server.ServerRuntime.process(ServerRuntime.java:256)","org.glassfish.jersey.server.ApplicationHandler.handle(ApplicationHandler.java:703)","org.glassfish.jersey.servlet.WebComponent.serviceImpl(WebComponent.java:416)","org.glassfish.jersey.servlet.ServletContainer.serviceImpl(ServletContainer.java:409)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:584)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:525)","org.glassfish.jersey.servlet.ServletContainer.doFilter(ServletContainer.java:462)","org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642)","org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)","org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)","org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)","org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)","org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)","org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)","org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)","org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)","org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)","org.eclipse.jetty.server.handler.StatisticsHandler.handle(StatisticsHandler.java:169)","org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)","org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)","org.eclipse.jetty.server.Server.handle(Server.java:531)","org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352)","org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260)","org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281)","org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102)","org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)","org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)","org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)","org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:760)","org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:678)","java.lang.Thread.run(Thread.java:748)"],"statementText":"DESCRIBE
- EXTENDED ksql_python_test_test_get_stream_info;","entities":[]}'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:44 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 400, message: Bad Request}
-- request:
- body: '{"ksql": "DROP\n STREAM IF EXISTS\n ksql_python_test_test_get_stream_info;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['82']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"DROP\n STREAM IF
- EXISTS\n ksql_python_test_test_get_stream_info;","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO/drop","commandStatus":{"status":"SUCCESS","message":"Source
- KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO does not exist."}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:45 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "CREATE STREAM ksql_python_test_test_get_stream_info (viewtime
- bigint, userid varchar, pageid varchar) WITH (kafka_topic=''ksql_python_test_exist_topic'',
- value_format=''DELIMITED'');"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['214']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"currentStatus","statementText":"CREATE STREAM ksql_python_test_test_get_stream_info
- (viewtime bigint, userid varchar, pageid varchar) WITH
- (kafka_topic=''ksql_python_test_exist_topic'', value_format=''DELIMITED'');","commandId":"stream/KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO/create","commandStatus":{"status":"SUCCESS","message":"Stream
- created"}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:45 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-- request:
- body: '{"ksql": "DESCRIBE EXTENDED ksql_python_test_test_get_stream_info;"}'
- headers:
- Accept: [application/json]
- Accept-Encoding: ['gzip, deflate']
- Connection: [keep-alive]
- Content-Length: ['68']
- Content-Type: [application/json]
- User-Agent: [python-requests/2.19.1]
- method: POST
- uri: http://localhost:8088/ksql
- response:
- body: {string: '[{"@type":"sourceDescription","statementText":"DESCRIBE EXTENDED
- ksql_python_test_test_get_stream_info;","sourceDescription":{"name":"KSQL_PYTHON_TEST_TEST_GET_STREAM_INFO","readQueries":[],"writeQueries":[],"fields":[{"name":"ROWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"ROWKEY","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"VIEWTIME","schema":{"type":"BIGINT","fields":null,"memberSchema":null}},{"name":"USERID","schema":{"type":"STRING","fields":null,"memberSchema":null}},{"name":"PAGEID","schema":{"type":"STRING","fields":null,"memberSchema":null}}],"type":"STREAM","key":"","timestamp":"","statistics":"","errorStats":"","extended":true,"format":"DELIMITED","topic":"ksql_python_test_exist_topic","partitions":1,"replication":1}}]'}
- headers:
- Content-Type: [application/json]
- Date: ['Fri, 20 Jul 2018 20:10:45 GMT']
- Server: [Jetty(9.4.10.v20180503)]
- status: {code: 200, message: OK}
-version: 1
|
client.ksql('show tables') returns error 'not all arguments converted during string formatting'
`from ksql import KSQLAPI
api_key = 'ZD74E3GRK4QXWO6W'
api_secret = 'RByQinKf4ZYodiBLuCKybx92SSPrQwEwnA8DOaVfJEhAVf3LQ096yFteZkep4XKx'
ksql_endpoint = 'https://pksqlc-42o7q.us-east-1.aws.confluent.cloud:443'
client = KSQLAPI(ksql_endpoint, api_key=api_key, secret=api_secret)
client.ksql('show tables')`
This code returns:
`not all arguments converted during string formatting`
The offending code is in line 108 of api.py
` base64string = base64.b64encode('{}:{}' % (self.api_key, self.secret))`
Other calls to client return the same error, such as
`client.query('select userid from users')`
|
0.0
|
dbd864e2f424805a7c3170dbdfe3723fe7aea403
|
[
"tests/test_client.py::TestKSQLAPI::test_ksql_show_tables_with_api_key"
] |
[
"tests/test_client.py::TestKSQLAPI::test_bad_requests",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_with_conditions_with_startwith",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_with_conditions_with_startwith_with_and",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_with_conditions_without_startwith",
"tests/test_client.py::TestKSQLAPI::test_create_stream_as_without_conditions",
"tests/test_client.py::TestKSQLAPI::test_get_ksql_version_success",
"tests/test_client.py::TestKSQLAPI::test_get_properties",
"tests/test_client.py::TestKSQLAPI::test_get_url",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream_as_with_wrong_timestamp",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream_by_builder",
"tests/test_client.py::TestKSQLAPI::test_ksql_create_stream_by_builder_api",
"tests/test_client.py::TestKSQLAPI::test_ksql_server_healthcheck",
"tests/test_client.py::TestKSQLAPI::test_ksql_show_tables",
"tests/test_client.py::TestKSQLAPI::test_ksql_show_tables_with_no_semicolon",
"tests/test_client.py::TestKSQLAPI::test_raise_create_error_no_topic",
"tests/test_client.py::TestKSQLAPI::test_raise_create_error_topic_already_registered",
"tests/test_client.py::TestKSQLAPI::test_with_timeout"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-18 17:34:25+00:00
|
mit
| 1,440
|
|
online-judge-tools__template-generator-56
|
diff --git a/onlinejudge_template/generator/python.py b/onlinejudge_template/generator/python.py
index 88075ef..5fd6303 100644
--- a/onlinejudge_template/generator/python.py
+++ b/onlinejudge_template/generator/python.py
@@ -114,11 +114,11 @@ def _generate_input_dfs(node: FormatNode, *, declared: Set[VarName], initialized
elif type_ == VarType.Float:
return OtherNode(line=f"""{var} = 100.0 * random.random() # TODO: edit here""")
elif type_ == VarType.String:
- return OtherNode(line=f"""{var} = ''.join([random.choice('abcde') for range(random.randint(1, 100))]) # TODO: edit here""")
+ return OtherNode(line=f"""{var} = ''.join([random.choice('abcde') for _ in range(random.randint(1, 100))]) # TODO: edit here""")
elif type_ == VarType.Char:
return OtherNode(line=f"""{var} = random.choice('abcde') # TODO: edit here""")
else:
- return OtherNode(line=f"""{var} = None # TODO: edit here""")
+ return OtherNode(line=f"""{var} = random.randint(1, 10) # TODO: edit here""")
elif isinstance(node, NewlineNode):
return SentencesNode(sentences=[])
elif isinstance(node, SequenceNode):
diff --git a/setup.cfg b/setup.cfg
index ac26f88..ca5af62 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -14,9 +14,9 @@ classifiers =
[options.extras_require]
dev =
- isort == 5.4.1
+ isort == 5.5.2
mypy == 0.782
- pylint == 2.5.3
+ pylint == 2.6.0
yapf == 0.30.0
doc =
sphinx >= 2.4
|
online-judge-tools/template-generator
|
f0230727fb9e67eb20d3f5c537d1747bb9fb90b3
|
diff --git a/tests/command_template.py b/tests/command_template.py
index 28de86f..facea02 100644
--- a/tests/command_template.py
+++ b/tests/command_template.py
@@ -13,6 +13,7 @@ from onlinejudge_template.main import main
class TestOJTemplateCommand(unittest.TestCase):
"""TestOJTemplateCommand is a class for end-to-end tests about oj-template command.
+ The tests actually compile and execute the generated code and check them get AC on sample cases.
"""
def _helper(self, *, url: str, template: str, placeholder: str, code: str, compile: Callable[[pathlib.Path], List[str]], command: Callable[[pathlib.Path], str]):
with tempfile.TemporaryDirectory() as tmpdir_:
@@ -43,8 +44,8 @@ class TestOJTemplateCommand(unittest.TestCase):
y = str(b) * a
return int(min(x, y))
"""), ' ')
- compile = lambda tmpdir: [sys.executable, '--version']
- command = lambda tmpdir: ' '.join([sys.executable, str(tmpdir / 'main.py')])
+ compile = lambda tmpdir: [sys.executable, '--version'] # nop
+ command = lambda tmpdir: ' '.join([sys.executable, str(tmpdir / template)])
self._helper(url=url, template=template, placeholder=placeholder, code=code, compile=compile, command=command)
def test_main_cpp_aplusb(self) -> None:
@@ -54,6 +55,60 @@ class TestOJTemplateCommand(unittest.TestCase):
code = textwrap.indent(textwrap.dedent("""\
return A + B;
"""), ' ')
- compile = lambda tmpdir: ['g++', '-std=c++14', str(tmpdir / 'main.cpp'), '-o', str(tmpdir / 'a.out')]
+ compile = lambda tmpdir: ['g++', '-std=c++14', str(tmpdir / template), '-o', str(tmpdir / 'a.out')]
command = lambda tmpdir: str(tmpdir / 'a.out')
self._helper(url=url, template=template, placeholder=placeholder, code=code, compile=compile, command=command)
+
+
+class TestOJTemplateCommandGenerator(unittest.TestCase):
+ """TestOJTemplateCommandGenerator is a class for end-to-end tests about oj-template command.
+ The tests actually executes the generator and check the result with a validator.
+ """
+ def _helper(self, *, url: str, template: str, compile: Callable[[pathlib.Path], List[str]], command: Callable[[pathlib.Path], List[str]]):
+ with tempfile.TemporaryDirectory() as tmpdir_:
+ tmpdir = pathlib.Path(tmpdir_)
+ source_file = tmpdir / template
+
+ # generate
+ with open(source_file, 'w') as fh:
+ with contextlib.redirect_stdout(fh):
+ main(['-t', template, url])
+
+ # test
+ subprocess.check_call(compile(tmpdir), stdout=sys.stdout, stderr=sys.stderr)
+ return subprocess.check_output(command(tmpdir), stderr=sys.stderr)
+
+ def test_generate_py_arc088_b(self) -> None:
+ # arc088_b has a format with a binary string variable.
+ url = 'https://atcoder.jp/contests/arc088/tasks/arc088_b'
+ template = 'generate.py'
+ compile = lambda tmpdir: [sys.executable, '--version'] # nop
+ command = lambda tmpdir: [sys.executable, str(tmpdir / template)]
+
+ def validate(case: bytes) -> None:
+ lines = case.splitlines()
+ self.assertEqual(len(lines), 1)
+ s, = lines[0].split()
+ self.assertTrue(s.isalpha())
+
+ validate(self._helper(url=url, template=template, compile=compile, command=command))
+
+ def test_generate_py_arc089_b(self) -> None:
+ # arc089_b has a non-trivial format with char variables.
+ url = 'https://atcoder.jp/contests/arc089/tasks/arc089_b'
+ template = 'generate.py'
+ compile = lambda tmpdir: [sys.executable, '--version'] # nop
+ command = lambda tmpdir: [sys.executable, str(tmpdir / template)]
+
+ def validate(case: bytes) -> None:
+ lines = case.splitlines()
+ n, k = map(int, lines[0].split())
+ self.assertEqual(len(lines) - 1, n)
+ for line in lines[1:]:
+ x, y, c = line.split()
+ int(x)
+ int(y)
+ self.assertTrue(c.isalpha())
+ self.assertEqual(len(c), 1)
+
+ validate(self._helper(url=url, template=template, compile=compile, command=command))
|
The default template `generate.py` has syntax error when string variables exsit in input
```shell
oj-template -t generate.py https://atcoder.jp/contests/abc166/tasks/abc166_a
```
の生成コードが syntax error となっているようです。文字列が入力に与えられる他の問題でも同様でした。僕が python に不慣れなため、具体的な間違いの内容は分かりませんでした。。
|
0.0
|
f0230727fb9e67eb20d3f5c537d1747bb9fb90b3
|
[
"tests/command_template.py::TestOJTemplateCommandGenerator::test_generate_py_arc088_b"
] |
[
"tests/command_template.py::TestOJTemplateCommand::test_main_py_abc152_b",
"tests/command_template.py::TestOJTemplateCommandGenerator::test_generate_py_arc089_b"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-19 04:45:29+00:00
|
mit
| 4,397
|
|
pavdmyt__yaspin-35
|
diff --git a/HISTORY.rst b/HISTORY.rst
index e42a4e8..f811286 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,6 +1,12 @@
Release History
===============
+0.14.3 / 2019-05-12
+-------------------
+
+* fix(#29): race condition between spinner thread and ``write()``
+
+
0.14.2 / 2019-04-27
-------------------
diff --git a/setup.cfg b/setup.cfg
index 414ae9c..710350c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
[bumpversion]
-current_version = 0.14.2
+current_version = 0.14.3
[metadata]
description-file = README.rst
diff --git a/yaspin/__version__.py b/yaspin/__version__.py
index 745162e..23f0070 100644
--- a/yaspin/__version__.py
+++ b/yaspin/__version__.py
@@ -1,1 +1,1 @@
-__version__ = "0.14.2"
+__version__ = "0.14.3"
diff --git a/yaspin/core.py b/yaspin/core.py
index f61bdc0..803daf7 100644
--- a/yaspin/core.py
+++ b/yaspin/core.py
@@ -76,6 +76,7 @@ class Yaspin(object):
self._hide_spin = None
self._spin_thread = None
self._last_frame = None
+ self._stdout_lock = threading.Lock()
# Signals
@@ -247,43 +248,47 @@ class Yaspin(object):
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and not self._hide_spin.is_set():
- # set the hidden spinner flag
- self._hide_spin.set()
+ with self._stdout_lock:
+ # set the hidden spinner flag
+ self._hide_spin.set()
- # clear the current line
- sys.stdout.write("\r")
- self._clear_line()
+ # clear the current line
+ sys.stdout.write("\r")
+ self._clear_line()
- # flush the stdout buffer so the current line can be rewritten to
- sys.stdout.flush()
+ # flush the stdout buffer so the current line
+ # can be rewritten to
+ sys.stdout.flush()
def show(self):
"""Show the hidden spinner."""
thr_is_alive = self._spin_thread and self._spin_thread.is_alive()
if thr_is_alive and self._hide_spin.is_set():
- # clear the hidden spinner flag
- self._hide_spin.clear()
+ with self._stdout_lock:
+ # clear the hidden spinner flag
+ self._hide_spin.clear()
- # clear the current line so the spinner is not appended to it
- sys.stdout.write("\r")
- self._clear_line()
+ # clear the current line so the spinner is not appended to it
+ sys.stdout.write("\r")
+ self._clear_line()
def write(self, text):
"""Write text in the terminal without breaking the spinner."""
# similar to tqdm.write()
# https://pypi.python.org/pypi/tqdm#writing-messages
- sys.stdout.write("\r")
- self._clear_line()
+ with self._stdout_lock:
+ sys.stdout.write("\r")
+ self._clear_line()
- _text = to_unicode(text)
- if PY2:
- _text = _text.encode(ENCODING)
+ _text = to_unicode(text)
+ if PY2:
+ _text = _text.encode(ENCODING)
- # Ensure output is bytes for Py2 and Unicode for Py3
- assert isinstance(_text, builtin_str)
+ # Ensure output is bytes for Py2 and Unicode for Py3
+ assert isinstance(_text, builtin_str)
- sys.stdout.write("{0}\n".format(_text))
+ sys.stdout.write("{0}\n".format(_text))
def ok(self, text="OK"):
"""Set Ok (success) finalizer to a spinner."""
@@ -306,7 +311,8 @@ class Yaspin(object):
# Should be stopped here, otherwise prints after
# self._freeze call will mess up the spinner
self.stop()
- sys.stdout.write(self._last_frame)
+ with self._stdout_lock:
+ sys.stdout.write(self._last_frame)
def _spin(self):
while not self._stop_spin.is_set():
@@ -321,9 +327,10 @@ class Yaspin(object):
out = self._compose_out(spin_phase)
# Write
- sys.stdout.write(out)
- self._clear_line()
- sys.stdout.flush()
+ with self._stdout_lock:
+ sys.stdout.write(out)
+ self._clear_line()
+ sys.stdout.flush()
# Wait
time.sleep(self._interval)
|
pavdmyt/yaspin
|
dd2fd2187fbdef81aede3ea6d2053c1ccd2d8034
|
diff --git a/tests/test_in_out.py b/tests/test_in_out.py
index 180b642..0fe6e49 100644
--- a/tests/test_in_out.py
+++ b/tests/test_in_out.py
@@ -8,7 +8,9 @@ Checks that all input data is converted to unicode.
And all output data is converted to builtin str type.
"""
+import re
import sys
+import time
import pytest
@@ -159,3 +161,22 @@ def test_hide_show(capsys, text, request):
# ensure that text was cleared before resuming the spinner
assert out[:4] == "\r\033[K"
+
+
+def test_spinner_write_race_condition(capsys):
+ # test that spinner text does not overwrite write() contents
+ # this generally happens when the spinner thread writes
+ # between write()'s \r and the text it actually wants to write
+
+ sp = yaspin(text="aaaa")
+ sp.start()
+ sp._interval = 0.0
+ start_time = time.time()
+ while time.time() - start_time < 3.0:
+ sp.write("bbbb")
+ sp.stop()
+
+ out, _ = capsys.readouterr()
+ assert "aaaa" in out # spinner text is present
+ assert "bbbb" in out # write() text is present
+ assert not re.search(r"aaaa[^\rb]*bbbb", out)
|
Race condition between spinner thread and write()
When I use `write()` a lot on a spinner it often happens to me that the written text is displayed after the spinner message, like this:
```
[...]
noot noot
⠋ spinning...noot noot
noot noot
[...]
```
I used the script below to recreate the problem. It takes some time (400-800 writes) with it, but happens to me far more often with real world applications.
```python
import random
import time
import yaspin
with yaspin.yaspin() as sp:
sp.text = 'spinning...'
while True:
sp.write('noot noot')
time.sleep(0.051 + random.randint(0, 10) / 100)
```
I think this happens because of concurrent access to `sys.stdout.write()` by the spinning thread and the spinners `write()`. This should be solvable by using a `threading.Lock()` and guarding access to sys.stdout.write() with it. In a quick local hack-fix-test it worked for me by doing this in `write()` and `_spin()`, but there are a lot more functions that would need guarding, if done right.
|
0.0
|
dd2fd2187fbdef81aede3ea6d2053c1ccd2d8034
|
[
"tests/test_in_out.py::test_spinner_write_race_condition"
] |
[
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'empty",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'empty",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['ascii",
"tests/test_in_out.py::test_repr['ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'non-ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'non-ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['non-ascii",
"tests/test_in_out.py::test_out_converted_to_builtin_str['non-ascii",
"tests/test_in_out.py::test_repr['non-ascii",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'List[]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[bytes]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[bytes]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'List[bytes]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[unicode]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[unicode]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'List[unicode]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'List[str]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'List[str]",
"tests/test_in_out.py::test_repr['empty'-'List[str]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[bytes]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[bytes]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[bytes]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-reversal-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-reversal-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-reversal-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-reversal-left]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-default-right]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-default-right]",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[unicode]'-default-left]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[unicode]'-default-left]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[unicode]']",
"tests/test_in_out.py::test_input_converted_to_unicode['empty'-'Tuple[str]",
"tests/test_in_out.py::test_out_converted_to_builtin_str['empty'-'Tuple[str]",
"tests/test_in_out.py::test_repr['empty'-'Tuple[str]",
"tests/test_in_out.py::test_write['non-ascii",
"tests/test_in_out.py::test_hide_show['non-ascii",
"tests/test_in_out.py::test_repr['empty'-'non-ascii",
"tests/test_in_out.py::test_write['ascii",
"tests/test_in_out.py::test_hide_show['ascii",
"tests/test_in_out.py::test_repr['empty'-'ascii",
"tests/test_in_out.py::test_repr['empty'-'empty",
"tests/test_in_out.py::test_write['empty']",
"tests/test_in_out.py::test_hide_show['empty']",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[white-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_white-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[cyan-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_cyan-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-bold,",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-bold,",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[magenta-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_magenta-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[blue-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_blue-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-concealed]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_green-bold]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[yellow-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-blink]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_yellow-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_red-reverse]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-blink]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-underline]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-dark]",
"tests/test_in_out.py::test_compose_out_with_color[green-on_red-bold]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-underline]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-dark]",
"tests/test_in_out.py::test_compose_out_with_color[red-on_green-bold]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-04-18 15:28:04+00:00
|
mit
| 4,465
|
|
jaraco__path-216
|
diff --git a/newsfragments/216.feature.rst b/newsfragments/216.feature.rst
new file mode 100644
index 0000000..ffa5f9d
--- /dev/null
+++ b/newsfragments/216.feature.rst
@@ -0,0 +1,1 @@
+Use '.' as the default path.
\ No newline at end of file
diff --git a/path/__init__.py b/path/__init__.py
index 7d46d1d..2283175 100644
--- a/path/__init__.py
+++ b/path/__init__.py
@@ -142,7 +142,10 @@ class Path(str):
.. seealso:: :mod:`os.path`
"""
- def __init__(self, other=''):
+ def __new__(cls, other='.'):
+ return super().__new__(cls, other)
+
+ def __init__(self, other='.'):
if other is None:
raise TypeError("Invalid initial value for path: None")
with contextlib.suppress(AttributeError):
|
jaraco/path
|
54215092dd970c667c7234c6da5bfa0e3ad7ab89
|
diff --git a/test_path.py b/test_path.py
index d105369..0bc0440 100644
--- a/test_path.py
+++ b/test_path.py
@@ -78,6 +78,12 @@ class TestBasics:
d = Path('D:\\')
assert d.relpathto(boz) == boz
+ def test_construction_without_args(self):
+ """
+ Path class will construct a path to current directory when called with no arguments.
+ """
+ assert Path() == '.'
+
def test_construction_from_none(self):
""" """
with pytest.raises(TypeError):
@@ -424,7 +430,7 @@ def test_chroot(monkeypatch):
results = []
monkeypatch.setattr(os, 'chroot', results.append)
Path().chroot()
- assert results == ['']
+ assert results == [Path()]
@pytest.mark.skipif("not hasattr(Path, 'startfile')")
@@ -432,7 +438,7 @@ def test_startfile(monkeypatch):
results = []
monkeypatch.setattr(os, 'startfile', results.append)
Path().startfile()
- assert results == ['']
+ assert results == [Path()]
class TestScratchDir:
|
`path.Path().files()` fails with `FileNotFoundError`
```python
>>> import path, pathlib
>>> pathlib.Path()
WindowsPath('.')
>>> path.Path()
Path('')
>>> list(pathlib.Path().iterdir())
[WindowsPath('.git'),
WindowsPath('.idea'),
WindowsPath('LICENSE'),
WindowsPath('pyproject.toml'),
WindowsPath('README.rst'),
WindowsPath('setup.cfg')]
>>> path.Path().files()
Traceback (most recent call last):
File "C:\Users\a\AppData\Local\Programs\Python\Python311\Lib\site-packages\IPython\core\interactiveshell.py", line 3505, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-43-109000f98fc0>", line 1, in <module>
path.Path().files()
File "C:\Users\a\AppData\Local\Programs\Python\Python311\Lib\site-packages\path\__init__.py", line 514, in files
return [p for p in self.listdir(*args, **kwargs) if p.isfile()]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\a\AppData\Local\Programs\Python\Python311\Lib\site-packages\path\__init__.py", line 492, in listdir
return list(filter(match, (self / child for child in os.listdir(self))))
^^^^^^^^^^^^^^^^
FileNotFoundError: [WinError 3] The system cannot find the path specified: Path('')
>>> path.Path('.').files()
[Path('.\\LICENSE'),
Path('.\\pyproject.toml'),
Path('.\\README.rst'),
Path('.\\setup.cfg')]
```
I expect `path.Path().files()` to work just like `path.Path('.').files()`, but it currently raises error. Is that intentional or is it a bug?
This is not a Windows-specific issue. There is a similar error on my Linux machine: `FileNotFoundError: [Errno 2] No such file or directory: Path('')`.
|
0.0
|
54215092dd970c667c7234c6da5bfa0e3ad7ab89
|
[
"test_path.py::TestBasics::test_construction_without_args"
] |
[
"test_path.py::TestBasics::test_fspath",
"test_path.py::TestBasics::test_joinpath_returns_same_type",
"test_path.py::TestBasics::test_construction_from_none",
"test_path.py::TestBasics::test_joinpath_to_nothing",
"test_path.py::TestBasics::test_construction_from_int",
"test_path.py::TestBasics::test_properties",
"test_path.py::TestPatternMatching::test_fnmatch_custom_normcase",
"test_path.py::TestBasics::test_walk_errors",
"test_path.py::TestSpecialPaths::test_reused_SpecialResolver",
"test_path.py::TestBasics::test_relpath",
"test_path.py::TestBasics::test_methods",
"test_path.py::TestBasics::test_utime",
"test_path.py::TestSpecialPaths::test_other_parameters",
"test_path.py::TestBasics::test_read_md5",
"test_path.py::TestBasics::test_string_compatibility",
"test_path.py::TestBasics::test_expandvars",
"test_path.py::TestBasics::test_radd_string",
"test_path.py::TestBasics::test_explicit_module_classes",
"test_path.py::TestMultiPath::test_iteration",
"test_path.py::TestBasics::test_explicit_module",
"test_path.py::TestLinks::test_symlink_none",
"test_path.py::test_chroot",
"test_path.py::TestTempDir::test_next_class",
"test_path.py::TestMultiPath::test_for_class",
"test_path.py::TestMultiPath::test_detect_with_pathsep",
"test_path.py::TestTempDir::test_context_manager_using_with",
"test_path.py::TestBasics::test_joinpath_fails_on_empty",
"test_path.py::TestBasics::test_statvfs",
"test_path.py::TestBasics::test_joinpath_on_instance",
"test_path.py::TestTempDir::test_constructor",
"test_path.py::ruff",
"test_path.py::TestTempDir::test_context_manager",
"test_path.py::TestTempDir::test_cleaned_up_on_interrupt",
"test_path.py::TestPatternMatching::test_fnmatch_simple",
"test_path.py::TestSubclass::test_subclass_produces_same_class",
"test_path.py::TestBasics::test_joinpath_on_class",
"test_path.py::TestLinks::test_link",
"test_path.py::TestPatternMatching::test_fnmatch_custom_mod",
"test_path.py::TestBasics::test_normpath",
"test_path.py::TestBasics::test_expand",
"test_path.py::TestPatternMatching::test_listdir_empty_pattern",
"test_path.py::TestBasics::test_pathconf",
"test_path.py::TestSelfReturn::test_makedirs_p",
"test_path.py::TestBasics::test_chown",
"test_path.py::TestSelfReturn::test_makedirs_p_extant",
"test_path.py::TestSelfReturn::test_mkdir",
"test_path.py::TestBasics::test_read_hexhash",
"test_path.py::TestScratchDir::test_rmtree_p",
"test_path.py::TestHandlers::test_raise",
"test_path.py::TestReadWriteText::test_write_text_bytes",
"test_path.py::TestBasics::test_mkdir_p",
"test_path.py::TestOwnership::test_get_owner",
"test_path.py::TestSpecialPaths::test_property",
"test_path.py::TestBasics::test_removedirs_p",
"test_path.py::TestMultiPath::test_detect_no_pathsep",
"test_path.py::TestReadWriteText::test_read_write",
"test_path.py::TestBasics::test_relpathto",
"test_path.py::TestSpecialPaths::test_basic_paths",
"test_path.py::TestLinks::test_readlinkabs_rendered",
"test_path.py::TestSelfReturn::test_touch",
"test_path.py::TestBasics::test_walk_child_error",
"test_path.py::TestHandlers::test_invalid_handler",
"test_path.py::TestSelfReturn::test_rename",
"test_path.py::TestBasics::test_splitdrive",
"test_path.py::TestScratchDir::test_unicode[UTF-16]",
"test_path.py::TestHandlers::test_ignore",
"test_path.py::TestSymbolicLinksWalk::test_skip_symlinks",
"test_path.py::TestLinks::test_readlinkabs_passthrough",
"test_path.py::TestScratchDir::test_context_manager",
"test_path.py::TestHandlers::test_warn",
"test_path.py::TestBasics::test_renames",
"test_path.py::TestMergeTree::test_with_nonexisting_dst_kwargs",
"test_path.py::TestChdir::test_chdir_or_cd",
"test_path.py::TestScratchDir::test_rmdir_p_sub_sub_dir",
"test_path.py::TestPatternMatching::test_listdir_custom_module",
"test_path.py::TestScratchDir::test_rmdir_p_exists",
"test_path.py::TestScratchDir::test_rmdir_p_nonexistent",
"test_path.py::TestMergeTree::test_copytree_parameters",
"test_path.py::TestSpecialPaths::test_multipath",
"test_path.py::TestScratchDir::test_chunks",
"test_path.py::TestInPlace::test_write_mode_invalid",
"test_path.py::TestUnicode::test_walkdirs_with_unicode_name",
"test_path.py::TestScratchDir::test_shutil",
"test_path.py::TestPatternMatching::test_listdir_case_insensitive",
"test_path.py::TestMergeTree::test_only_newer",
"test_path.py::TestScratchDir::test_rmtree_p_nonexistent",
"test_path.py::TestPatternMatching::test_listdir_patterns",
"test_path.py::TestScratchDir::test_samefile",
"test_path.py::TestBasics::test_chmod_str",
"test_path.py::TestMergeTree::test_with_nonexisting_dst_args",
"test_path.py::TestScratchDir::test_listdir_other_encoding",
"test_path.py::TestScratchDir::test_makedirs",
"test_path.py::TestInPlace::test_exception_in_context",
"test_path.py::TestScratchDir::test_unicode[UTF-8]",
"test_path.py::TestInPlace::test_line_by_line_rewrite",
"test_path.py::TestSpecialPaths::test_unix_paths_fallback",
"test_path.py::TestPatternMatching::test_walk_case_insensitive",
"test_path.py::TestScratchDir::test_listing",
"test_path.py::TestScratchDir::test_patterns",
"test_path.py::TestScratchDir::test_unicode[UTF-16LE]",
"test_path.py::TestScratchDir::test_unicode[UTF-16BE]",
"test_path.py::TestMergeTree::test_nested",
"test_path.py::TestMergeTree::test_with_existing_dst",
"test_path.py::TestSpecialPaths::test_unix_paths",
"test_path.py::test_no_dependencies",
"test_path.py::TestPerformance::test_import_time",
"test_path.py::BLACK",
"test_path.py::mypy",
"test_path.py::mypy-status",
"test_path.py::TestScratchDir::test_touch"
] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-07 02:09:58+00:00
|
mit
| 3,218
|
|
dgilland__pydash-164
|
diff --git a/AUTHORS.rst b/AUTHORS.rst
index a1843ad..d167ec9 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -24,3 +24,4 @@ Contributors
- Gonzalo Naveira, `gonzalonaveira@github <https://github.com/gonzalonaveira>`_
- Wenbo Zhao, [email protected], `zhaowb@github <https://github.com/zhaowb>`_
- Mervyn Lee, `mervynlee94@github <https://github.com/mervynlee94>`_
+- Weineel Lee, `weineel@github <https://github.com/weineel>`_
diff --git a/src/pydash/objects.py b/src/pydash/objects.py
index f3bed2b..6949e8e 100644
--- a/src/pydash/objects.py
+++ b/src/pydash/objects.py
@@ -934,7 +934,7 @@ def _merge_with(obj, *sources, **kwargs):
if _result is not None:
result = _result
elif all_sequences or all_mappings:
- result = _merge_with(obj_value, src_value, _setter=setter)
+ result = _merge_with(obj_value, src_value, iteratee=iteratee, _setter=setter)
else:
result = src_value
|
dgilland/pydash
|
fa61732c01b39cec0de66f958cef27e7f31bcac2
|
diff --git a/tests/test_objects.py b/tests/test_objects.py
index 2217438..b218043 100644
--- a/tests/test_objects.py
+++ b/tests/test_objects.py
@@ -573,11 +573,11 @@ def test_merge_no_link_list():
[
(
(
- {"fruits": ["apple"], "vegetables": ["beet"]},
- {"fruits": ["banana"], "vegetables": ["carrot"]},
- lambda a, b: a + b if isinstance(a, list) else b,
+ {"fruits": ["apple"], "others": {"vegetables": ["beet"]}},
+ {"fruits": ["banana"], "others": {"vegetables": ["carrot"]}},
+ lambda a, b: a + b if isinstance(a, list) else None,
),
- {"fruits": ["apple", "banana"], "vegetables": ["beet", "carrot"]},
+ {"fruits": ["apple", "banana"], "others": {"vegetables": ["beet", "carrot"]}},
),
],
)
|
merge_with: iteratee is lost when _merge_with is recursing.
```python
def merge_array(obj_val, src_val):
if isinstance(obj_val, list) and isinstance(src_val, list):
return difference(obj_val + src_val)
merge_with({ 'd': { 'x': [2] } }, { 'd': { 'x': [5] } }, merge_array)
```
### expect
```
{ 'd': { 'x': [2, 5] } }
```
### actual
```
{ 'd': { 'x': [5] } }
```
|
0.0
|
fa61732c01b39cec0de66f958cef27e7f31bcac2
|
[
"tests/test_objects.py::test_merge_with[case0-expected0]"
] |
[
"tests/test_objects.py::test_assign[case0-expected0]",
"tests/test_objects.py::test_assign[case1-expected1]",
"tests/test_objects.py::test_assign_with[case0-expected0]",
"tests/test_objects.py::test_callables[case0-expected0]",
"tests/test_objects.py::test_callables[case1-expected1]",
"tests/test_objects.py::test_clone[case0]",
"tests/test_objects.py::test_clone[case1]",
"tests/test_objects.py::test_clone_with[case0-<lambda>-expected0]",
"tests/test_objects.py::test_clone_with[case1-<lambda>-expected1]",
"tests/test_objects.py::test_clone_deep[case0]",
"tests/test_objects.py::test_clone_deep[case1]",
"tests/test_objects.py::test_clone_deep[case2]",
"tests/test_objects.py::test_clone_deep_with[case0-<lambda>-expected0]",
"tests/test_objects.py::test_clone_deep_with[case1-<lambda>-expected1]",
"tests/test_objects.py::test_clone_deep_with[case2-<lambda>-expected2]",
"tests/test_objects.py::test_clone_deep_with[a-<lambda>-a]",
"tests/test_objects.py::test_defaults[case0-expected0]",
"tests/test_objects.py::test_defaults_deep[case0-expected0]",
"tests/test_objects.py::test_defaults_deep[case1-expected1]",
"tests/test_objects.py::test_defaults_deep[case2-expected2]",
"tests/test_objects.py::test_defaults_deep[case3-expected3]",
"tests/test_objects.py::test_defaults_deep[case4-expected4]",
"tests/test_objects.py::test_to_dict[case0-expected0]",
"tests/test_objects.py::test_to_dict[case1-expected1]",
"tests/test_objects.py::test_invert[case0-expected0]",
"tests/test_objects.py::test_invert[case1-expected1]",
"tests/test_objects.py::test_invert_by[case0-expected0]",
"tests/test_objects.py::test_invert_by[case1-expected1]",
"tests/test_objects.py::test_invert_by[case2-expected2]",
"tests/test_objects.py::test_invoke[case0-1]",
"tests/test_objects.py::test_invoke[case1-2]",
"tests/test_objects.py::test_invoke[case2-None]",
"tests/test_objects.py::test_find_key[case0-expected0]",
"tests/test_objects.py::test_find_key[case1-expected1]",
"tests/test_objects.py::test_find_key[case2-expected2]",
"tests/test_objects.py::test_find_last_key[case0-expected0]",
"tests/test_objects.py::test_find_last_key[case1-expected1]",
"tests/test_objects.py::test_find_last_key[case2-expected2]",
"tests/test_objects.py::test_for_in[case0-expected0]",
"tests/test_objects.py::test_for_in[case1-expected1]",
"tests/test_objects.py::test_for_in[case2-expected2]",
"tests/test_objects.py::test_for_in_right[case0-expected0]",
"tests/test_objects.py::test_for_in_right[case1-expected1]",
"tests/test_objects.py::test_for_in_right[case2-expected2]",
"tests/test_objects.py::test_get[case0-expected0]",
"tests/test_objects.py::test_get[case1-4]",
"tests/test_objects.py::test_get[case2-expected2]",
"tests/test_objects.py::test_get[case3-4]",
"tests/test_objects.py::test_get[case4-None]",
"tests/test_objects.py::test_get[case5-expected5]",
"tests/test_objects.py::test_get[case6-expected6]",
"tests/test_objects.py::test_get[case7-expected7]",
"tests/test_objects.py::test_get[case8-None]",
"tests/test_objects.py::test_get[case9-None]",
"tests/test_objects.py::test_get[case10-2]",
"tests/test_objects.py::test_get[case11-2]",
"tests/test_objects.py::test_get[case12-expected12]",
"tests/test_objects.py::test_get[case13-expected13]",
"tests/test_objects.py::test_get[case14-haha]",
"tests/test_objects.py::test_get[case15-haha]",
"tests/test_objects.py::test_get[case16-None]",
"tests/test_objects.py::test_get[case17-5]",
"tests/test_objects.py::test_get[case18-5]",
"tests/test_objects.py::test_get[case19-5]",
"tests/test_objects.py::test_get[case20-4]",
"tests/test_objects.py::test_get[case21-5]",
"tests/test_objects.py::test_get[case22-42]",
"tests/test_objects.py::test_get[case23-42]",
"tests/test_objects.py::test_get[case24-42]",
"tests/test_objects.py::test_get[case25-42]",
"tests/test_objects.py::test_get[case26-value]",
"tests/test_objects.py::test_get[case27-expected27]",
"tests/test_objects.py::test_get[case28-None]",
"tests/test_objects.py::test_get[case29-1]",
"tests/test_objects.py::test_get[case30-1]",
"tests/test_objects.py::test_get[case31-1]",
"tests/test_objects.py::test_get[case32-None]",
"tests/test_objects.py::test_get[case33-None]",
"tests/test_objects.py::test_get[case34-expected34]",
"tests/test_objects.py::test_get[case35-3]",
"tests/test_objects.py::test_get[case36-1]",
"tests/test_objects.py::test_get[case37-1]",
"tests/test_objects.py::test_get[case38-John",
"tests/test_objects.py::test_get__should_not_populate_defaultdict",
"tests/test_objects.py::test_has[case0-True]",
"tests/test_objects.py::test_has[case1-True]",
"tests/test_objects.py::test_has[case2-True]",
"tests/test_objects.py::test_has[case3-False]",
"tests/test_objects.py::test_has[case4-True]",
"tests/test_objects.py::test_has[case5-True]",
"tests/test_objects.py::test_has[case6-True]",
"tests/test_objects.py::test_has[case7-False]",
"tests/test_objects.py::test_has[case8-True]",
"tests/test_objects.py::test_has[case9-True]",
"tests/test_objects.py::test_has[case10-True]",
"tests/test_objects.py::test_has[case11-True]",
"tests/test_objects.py::test_has[case12-False]",
"tests/test_objects.py::test_has[case13-False]",
"tests/test_objects.py::test_has[case14-False]",
"tests/test_objects.py::test_has[case15-False]",
"tests/test_objects.py::test_has[case16-True]",
"tests/test_objects.py::test_has[case17-True]",
"tests/test_objects.py::test_has[case18-True]",
"tests/test_objects.py::test_has[case19-True]",
"tests/test_objects.py::test_has[case20-True]",
"tests/test_objects.py::test_has__should_not_populate_defaultdict",
"tests/test_objects.py::test_keys[case0-expected0]",
"tests/test_objects.py::test_keys[case1-expected1]",
"tests/test_objects.py::test_map_values[case0-expected0]",
"tests/test_objects.py::test_map_values[case1-expected1]",
"tests/test_objects.py::test_map_values_deep[case0-expected0]",
"tests/test_objects.py::test_map_values_deep[case1-expected1]",
"tests/test_objects.py::test_merge[case0-expected0]",
"tests/test_objects.py::test_merge[case1-expected1]",
"tests/test_objects.py::test_merge[case2-expected2]",
"tests/test_objects.py::test_merge[case3-expected3]",
"tests/test_objects.py::test_merge[case4-expected4]",
"tests/test_objects.py::test_merge[case5-expected5]",
"tests/test_objects.py::test_merge[case6-expected6]",
"tests/test_objects.py::test_merge[case7-expected7]",
"tests/test_objects.py::test_merge[case8-expected8]",
"tests/test_objects.py::test_merge[case9-expected9]",
"tests/test_objects.py::test_merge[case10-None]",
"tests/test_objects.py::test_merge[case11-None]",
"tests/test_objects.py::test_merge[case12-None]",
"tests/test_objects.py::test_merge[case13-expected13]",
"tests/test_objects.py::test_merge[case14-expected14]",
"tests/test_objects.py::test_merge[case15-expected15]",
"tests/test_objects.py::test_merge_no_link_dict",
"tests/test_objects.py::test_merge_no_link_list",
"tests/test_objects.py::test_omit[case0-expected0]",
"tests/test_objects.py::test_omit[case1-expected1]",
"tests/test_objects.py::test_omit[case2-expected2]",
"tests/test_objects.py::test_omit[case3-expected3]",
"tests/test_objects.py::test_omit[case4-expected4]",
"tests/test_objects.py::test_omit[case5-expected5]",
"tests/test_objects.py::test_omit[case6-expected6]",
"tests/test_objects.py::test_omit[case7-expected7]",
"tests/test_objects.py::test_omit[case8-expected8]",
"tests/test_objects.py::test_omit_by[case0-expected0]",
"tests/test_objects.py::test_omit_by[case1-expected1]",
"tests/test_objects.py::test_omit_by[case2-expected2]",
"tests/test_objects.py::test_omit_by[case3-expected3]",
"tests/test_objects.py::test_parse_int[case0-1]",
"tests/test_objects.py::test_parse_int[case1-1]",
"tests/test_objects.py::test_parse_int[case2-1]",
"tests/test_objects.py::test_parse_int[case3-1]",
"tests/test_objects.py::test_parse_int[case4-11]",
"tests/test_objects.py::test_parse_int[case5-10]",
"tests/test_objects.py::test_parse_int[case6-8]",
"tests/test_objects.py::test_parse_int[case7-16]",
"tests/test_objects.py::test_parse_int[case8-10]",
"tests/test_objects.py::test_parse_int[case9-None]",
"tests/test_objects.py::test_pick[case0-expected0]",
"tests/test_objects.py::test_pick[case1-expected1]",
"tests/test_objects.py::test_pick[case2-expected2]",
"tests/test_objects.py::test_pick[case3-expected3]",
"tests/test_objects.py::test_pick[case4-expected4]",
"tests/test_objects.py::test_pick[case5-expected5]",
"tests/test_objects.py::test_pick[case6-expected6]",
"tests/test_objects.py::test_pick[case7-expected7]",
"tests/test_objects.py::test_pick[case8-expected8]",
"tests/test_objects.py::test_pick[case9-expected9]",
"tests/test_objects.py::test_pick[case10-expected10]",
"tests/test_objects.py::test_pick_by[case0-expected0]",
"tests/test_objects.py::test_pick_by[case1-expected1]",
"tests/test_objects.py::test_pick_by[case2-expected2]",
"tests/test_objects.py::test_pick_by[case3-expected3]",
"tests/test_objects.py::test_pick_by[case4-expected4]",
"tests/test_objects.py::test_pick_by[case5-expected5]",
"tests/test_objects.py::test_pick_by[case6-expected6]",
"tests/test_objects.py::test_rename_keys[case0-expected0]",
"tests/test_objects.py::test_rename_keys[case1-expected1]",
"tests/test_objects.py::test_rename_keys[case2-expected2]",
"tests/test_objects.py::test_set_[case0-expected0]",
"tests/test_objects.py::test_set_[case1-expected1]",
"tests/test_objects.py::test_set_[case2-expected2]",
"tests/test_objects.py::test_set_[case3-expected3]",
"tests/test_objects.py::test_set_[case4-expected4]",
"tests/test_objects.py::test_set_[case5-expected5]",
"tests/test_objects.py::test_set_[case6-expected6]",
"tests/test_objects.py::test_set_[case7-expected7]",
"tests/test_objects.py::test_set_[case8-expected8]",
"tests/test_objects.py::test_set_[case9-expected9]",
"tests/test_objects.py::test_set_[case10-expected10]",
"tests/test_objects.py::test_set_[case11-expected11]",
"tests/test_objects.py::test_set_[case12-expected12]",
"tests/test_objects.py::test_set_[case13-expected13]",
"tests/test_objects.py::test_set_[case14-expected14]",
"tests/test_objects.py::test_set_with[case0-expected0]",
"tests/test_objects.py::test_set_with[case1-expected1]",
"tests/test_objects.py::test_set_with[case2-expected2]",
"tests/test_objects.py::test_set_with[case3-expected3]",
"tests/test_objects.py::test_to_boolean[case0-True]",
"tests/test_objects.py::test_to_boolean[case1-False]",
"tests/test_objects.py::test_to_boolean[case2-True]",
"tests/test_objects.py::test_to_boolean[case3-True]",
"tests/test_objects.py::test_to_boolean[case4-False]",
"tests/test_objects.py::test_to_boolean[case5-False]",
"tests/test_objects.py::test_to_boolean[case6-None]",
"tests/test_objects.py::test_to_boolean[case7-None]",
"tests/test_objects.py::test_to_boolean[case8-False]",
"tests/test_objects.py::test_to_boolean[case9-True]",
"tests/test_objects.py::test_to_boolean[case10-False]",
"tests/test_objects.py::test_to_boolean[case11-True]",
"tests/test_objects.py::test_to_boolean[case12-False]",
"tests/test_objects.py::test_to_boolean[case13-False]",
"tests/test_objects.py::test_to_boolean[case14-True]",
"tests/test_objects.py::test_to_boolean[case15-False]",
"tests/test_objects.py::test_to_boolean[case16-True]",
"tests/test_objects.py::test_to_boolean[case17-None]",
"tests/test_objects.py::test_to_boolean[case18-False]",
"tests/test_objects.py::test_to_boolean[case19-None]",
"tests/test_objects.py::test_to_integer[1.4-1_0]",
"tests/test_objects.py::test_to_integer[1.9-1_0]",
"tests/test_objects.py::test_to_integer[1.4-1_1]",
"tests/test_objects.py::test_to_integer[1.9-1_1]",
"tests/test_objects.py::test_to_integer[foo-0]",
"tests/test_objects.py::test_to_integer[None-0]",
"tests/test_objects.py::test_to_integer[True-1]",
"tests/test_objects.py::test_to_integer[False-0]",
"tests/test_objects.py::test_to_integer[case8-0]",
"tests/test_objects.py::test_to_integer[case9-0]",
"tests/test_objects.py::test_to_integer[case10-0]",
"tests/test_objects.py::test_to_number[case0-3.0]",
"tests/test_objects.py::test_to_number[case1-2.6]",
"tests/test_objects.py::test_to_number[case2-990.0]",
"tests/test_objects.py::test_to_number[case3-None]",
"tests/test_objects.py::test_to_pairs[case0-expected0]",
"tests/test_objects.py::test_to_pairs[case1-expected1]",
"tests/test_objects.py::test_to_string[1-1]",
"tests/test_objects.py::test_to_string[1.25-1.25]",
"tests/test_objects.py::test_to_string[True-True]",
"tests/test_objects.py::test_to_string[case3-[1]]",
"tests/test_objects.py::test_to_string[d\\xc3\\xa9j\\xc3\\xa0",
"tests/test_objects.py::test_to_string[-]",
"tests/test_objects.py::test_to_string[None-]",
"tests/test_objects.py::test_to_string[case7-2024-08-02]",
"tests/test_objects.py::test_transform[case0-expected0]",
"tests/test_objects.py::test_transform[case1-expected1]",
"tests/test_objects.py::test_transform[case2-expected2]",
"tests/test_objects.py::test_update[case0-expected0]",
"tests/test_objects.py::test_update[case1-expected1]",
"tests/test_objects.py::test_update[case2-expected2]",
"tests/test_objects.py::test_update_with[case0-expected0]",
"tests/test_objects.py::test_update_with[case1-expected1]",
"tests/test_objects.py::test_update_with[case2-expected2]",
"tests/test_objects.py::test_unset[obj0-a.0.b.c-True-new_obj0]",
"tests/test_objects.py::test_unset[obj1-1-True-new_obj1]",
"tests/test_objects.py::test_unset[obj2-1-True-new_obj2]",
"tests/test_objects.py::test_unset[obj3-path3-True-new_obj3]",
"tests/test_objects.py::test_unset[obj4-[0][0]-False-new_obj4]",
"tests/test_objects.py::test_unset[obj5-[0][0][0]-False-new_obj5]",
"tests/test_objects.py::test_values[case0-expected0]",
"tests/test_objects.py::test_values[case1-expected1]"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-26 09:07:43+00:00
|
mit
| 1,911
|
|
guykisel__inline-plz-228
|
diff --git a/inlineplz/linters/__init__.py b/inlineplz/linters/__init__.py
index 7fede16..a0fd9a4 100644
--- a/inlineplz/linters/__init__.py
+++ b/inlineplz/linters/__init__.py
@@ -100,9 +100,9 @@ LINTERS = {
'install': [['npm', 'install', 'eslint']],
'help': [os.path.normpath('./node_modules/.bin/eslint'), '-h'],
'run':
- [os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'json'],
+ [os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'unix'],
'rundefault': [
- os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'json',
+ os.path.normpath('./node_modules/.bin/eslint'), '.', '-f', 'unix',
'-c', '{config_dir}/.eslintrc.js', '--ignore-path', '{config_dir}/.eslintignore'
],
'dotfiles': [
diff --git a/inlineplz/linters/config/.eslintignore b/inlineplz/linters/config/.eslintignore
index 6713aaf..ce2175e 100644
--- a/inlineplz/linters/config/.eslintignore
+++ b/inlineplz/linters/config/.eslintignore
@@ -1,10 +1,10 @@
-coverage/**
-docs/**
-jsdoc/**
-templates/**
-tmp/**
-vendor/**
-src/**
-dist/**
-node_modules/**
+**/coverage/**
+**/docs/**
+**/jsdoc/**
+**/templates/**
+**/tmp/**
+**/vendor/**
+**/src/**
+**/dist/**
**/node_modules/**
+**/.tox/**
diff --git a/inlineplz/parsers/eslint.py b/inlineplz/parsers/eslint.py
index 3d0e556..972ae1e 100644
--- a/inlineplz/parsers/eslint.py
+++ b/inlineplz/parsers/eslint.py
@@ -12,14 +12,14 @@ class ESLintParser(ParserBase):
def parse(self, lint_data):
messages = set()
- for filedata in json.loads(lint_data):
- if filedata.get('messages'):
- for msgdata in filedata['messages']:
- try:
- path = filedata['filePath']
- line = msgdata['line']
- msgbody = msgdata['message']
- messages.add((path, line, msgbody))
- except (ValueError, KeyError):
- print('Invalid message: {0}'.format(msgdata))
+ for line in lint_data.split('\n'):
+ try:
+ parts = line.split(':')
+ if line.strip() and parts:
+ path = parts[0].strip()
+ line = int(parts[1].strip())
+ msgbody = ':'.join(parts[3:]).strip()
+ messages.add((path, line, msgbody))
+ except (ValueError, IndexError):
+ print('Invalid message: {0}'.format(line))
return messages
|
guykisel/inline-plz
|
dc293c43edd1609683294660fb7c6a0840fb24ea
|
diff --git a/tests/parsers/test_eslint.py b/tests/parsers/test_eslint.py
index 8255168..780af9f 100644
--- a/tests/parsers/test_eslint.py
+++ b/tests/parsers/test_eslint.py
@@ -18,6 +18,6 @@ eslint_path = os.path.join(
def test_eslint():
with codecs.open(eslint_path, encoding='utf-8', errors='replace') as inputfile:
messages = sorted(list(eslint.ESLintParser().parse(inputfile.read())))
- assert messages[0][2] == 'Parsing error: Illegal return statement'
- assert messages[0][1] == 17
- assert messages[0][0] == 'C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\asi.js'
+ assert messages[0][2] == "'addOne' is defined but never used. [Error/no-unused-vars]"
+ assert messages[0][1] == 1
+ assert messages[0][0] == '/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js'
diff --git a/tests/testdata/parsers/eslint.txt b/tests/testdata/parsers/eslint.txt
index 27a5040..04d345a 100644
--- a/tests/testdata/parsers/eslint.txt
+++ b/tests/testdata/parsers/eslint.txt
@@ -1,1 +1,9 @@
-[{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\data\\ascii-identifier-data.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\data\\non-ascii-identifier-part-only.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\data\\non-ascii-identifier-start.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\dist\\jshint-rhino.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\dist\\jshint.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\examples\\reporter.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\scripts\\build.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\scripts\\generate-identifier-data.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\cli.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\jshint.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\lex.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\messages.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\name-stack.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\options.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\platforms\\rhino.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reg.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\checkstyle.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\default.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\jslint_xml.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\non_error.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\reporters\\unix.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\scope-manager.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\state.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\style.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\src\\vars.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\browser.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\cli.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\browser\\fixture-fs.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\browser\\server.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\fixture.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\helpers\\testhelper.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\backbone.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\codemirror3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\jquery-1.7.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\json2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\lodash.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\libs\\prototype-17.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\npm.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\regression\\thirdparty.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\core.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\envs.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\asi.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":17,"column":20}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\blocks.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected end of input","line":32,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\boss.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\browser.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\camelcase.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\caseExpressions.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\class-declaration.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected reserved word","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\comma.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected identifier","line":15,"column":7}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\const.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token const","line":16,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\curly.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":2,"column":12}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\curly2.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":2,"column":12}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\default-arguments.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token =","line":7,"column":28}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\destparam.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token =","line":4,"column":17}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\emptystmt.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ;","line":1,"column":5}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\enforceall.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\eqeqeq.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es5.funcexpr.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es5.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Object literal may not have data and accessor property with the same name","line":43,"column":19}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es5Reserved.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token default","line":6,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-export-star-from.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal export declaration","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-import-export.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal import declaration","line":3,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-template-literal-tagged.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":5,"column":18}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\es6-template-literal.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":3,"column":15}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\exported.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\forin.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\function-declaration.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal export declaration","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\functionScopedOptions.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-2194.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-226.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-334.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-738-browser.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh-738-node.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1227.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1632-1.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1632-2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1632-3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-1.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-4.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-5.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1768-6.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh1802.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh247.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh431.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh56.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh618.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh668.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh826.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token <","line":24,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh870.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh878.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gh988.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\gruntComment.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\identifiers.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\ignore-w117.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\ignored.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\ignoreDelimiters.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token <","line":3,"column":4}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\immed.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\insideEval.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\jslintInverted.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\jslintOptions.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\jslintRenamed.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\lastsemic.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedef-esnext.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token let","line":1,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedef-inline.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\latedefundef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\laxbreak.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\laxcomma.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\leak.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token const","line":3,"column":4}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\loopfunc.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\mappingstart.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-cyclomatic-complexity-per-function.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-nested-block-depth-per-function.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-parameters-per-function.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token =","line":7,"column":13}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\max-statements-per-function.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\maxlen.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\multiline-global-declarations.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nativeobject.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nbsp.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nestedFunctions-locations.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\nestedFunctions.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token [","line":37,"column":3}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\newcap.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\noarg.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\onevar.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\parsingCommas.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ,","line":2,"column":13}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\protoiterator.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes3.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\quotes4.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":2,"column":14}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\redef-es6.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token let","line":2,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\redef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\regex_array.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Illegal return statement","line":6,"column":8}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\removeglobals.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\reserved.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token let","line":5,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\return.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\safeasi.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token .","line":10,"column":9}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scope-cross-blocks.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scope-redef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scope.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\scripturl.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\shadow-inline.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\shelljs.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_incorrect.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_newcap.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_this.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_this2.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strict_violations.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\strings.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token ILLEGAL","line":9,"column":22}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\supernew.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\switchDefaultFirst.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\switchFallThrough.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token :","line":40,"column":13}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\trycatch.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\typeofcomp.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\undef_func.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\undef.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\undefstrict.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unignored.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unused-cross-blocks.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unused.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token const","line":34,"column":2}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\unusedglobals.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\with.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Strict mode code may not include a with statement","line":13,"column":6}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\fixtures\\yield-expressions.js","messages":[{"fatal":true,"severity":2,"message":"Parsing error: Unexpected token *","line":1,"column":10}],"errorCount":1,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\options.js","messages":[],"errorCount":0,"warningCount":0},{"filePath":"C:\\Users\\Guy\\Documents\\jshint\\tests\\unit\\parser.js","messages":[],"errorCount":0,"warningCount":0}]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:1:10: 'addOne' is defined but never used. [Error/no-unused-vars]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:2:9: Use the isNaN function to compare with NaN. [Error/use-isnan]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:3:16: Unexpected space before unary operator '++'. [Error/space-unary-ops]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:3:20: Missing semicolon. [Warning/semi]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:4:12: Unnecessary 'else' after 'return'. [Warning/no-else-return]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:5:1: Expected indentation of 8 spaces but found 6. [Warning/indent]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:5:7: Function 'addOne' expected a return value. [Error/consistent-return]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:5:13: Missing semicolon. [Warning/semi]
+/var/lib/jenkins/workspace/Releases/ESLint Release/eslint/fullOfProblems.js:7:2: Unnecessary semicolon. [Error/no-extra-semi]
|
switch eslint to a different formatter
the json formatter breaks on long text: https://github.com/eslint/eslint/issues/5380
```
b'Invalid string length\nRangeError: Invalid string length\n at JSON.stringify (<anonymous>)\n at module.exports (/home/travis/build/guykisel/inline-plz/node_modules/eslint/lib/formatters/json.js:12:17)\n at printResults (/home/travis/build/guykisel/inline-plz/node_modules/eslint/lib/cli.js:91:20)\n at Object.execute (/home/travis/build/guykisel/inline-plz/node_modules/eslint/lib/cli.js:201:17)\n at Object.<anonymous> (/home/travis/build/guykisel/inline-plz/node_modules/eslint/bin/eslint.js:74:28)\n at Module._compile (module.js:635:30)\n at Object.Module._extensions..js (module.js:646:10)\n at Module.load (module.js:554:32)\n at tryModuleLoad (module.js:497:12)\n at Function.Module._load (module.js:489:3)'
Parsing of eslint took 0 seconds
```
|
0.0
|
dc293c43edd1609683294660fb7c6a0840fb24ea
|
[
"tests/parsers/test_eslint.py::test_eslint"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-05-01 23:55:15+00:00
|
isc
| 2,683
|
|
intake__intake-439
|
diff --git a/intake/source/base.py b/intake/source/base.py
index f3abbbb..79fc62e 100644
--- a/intake/source/base.py
+++ b/intake/source/base.py
@@ -259,13 +259,26 @@ class DataSource(DictSerialiseMixin):
return self.plot
def persist(self, ttl=None, **kwargs):
- """Save data from this source to local persistent storage"""
+ """Save data from this source to local persistent storage
+
+ Parameters
+ ----------
+ ttl: numeric, optional
+ Time to live in seconds. If provided, the original source will
+ be accessed and a new persisted version written transparently
+ when more than ``ttl`` seconds have passed since the old persisted
+ version was written.
+ kargs: passed to the _persist method on the base container.
+ """
from ..container import container_map
from ..container.persist import PersistStore
import time
if 'original_tok' in self.metadata:
raise ValueError('Cannot persist a source taken from the persist '
'store')
+ if ttl is not None and not isinstance(ttl, (int, float)):
+ raise ValueError('Cannot persist using a time to live that is '
+ f'non-numeric. User-provided ttl was {ttl}')
method = container_map[self.container]._persist
store = PersistStore()
out = method(self, path=store.getdir(self), **kwargs)
|
intake/intake
|
aa83ccd41ed32f90f6bef5dbbe0e8a9e67c6d781
|
diff --git a/intake/container/tests/test_persist.py b/intake/container/tests/test_persist.py
index d914d25..96a831f 100644
--- a/intake/container/tests/test_persist.py
+++ b/intake/container/tests/test_persist.py
@@ -44,6 +44,12 @@ def test_backtrack(temp_cache):
assert s3 == s
+def test_persist_with_nonnumeric_ttl_raises_error(temp_cache):
+ s = TextFilesSource("*.py")
+ with pytest.raises(ValueError, match="User-provided ttl was a string"):
+ s.persist(ttl='a string')
+
+
class DummyDataframe(DataSource):
name = 'dummy'
container = 'dataframe'
|
persist ttl argument should check that it's an int.
I just tried to use persist for the first time and passed a file name to the `ttl` argument (don't ask me why but I kind of thought `ttl` might be a terrible abbreviation of title). Passing a string to ttl did not through an error, but when I try to call `is_persisted` on the source, I am getting a comparison failure.
As part of this, might be nice to add to the persist docstring.
|
0.0
|
aa83ccd41ed32f90f6bef5dbbe0e8a9e67c6d781
|
[
"intake/container/tests/test_persist.py::test_persist_with_nonnumeric_ttl_raises_error"
] |
[
"intake/container/tests/test_persist.py::test_store",
"intake/container/tests/test_persist.py::test_backtrack"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-11-08 17:48:15+00:00
|
bsd-2-clause
| 2,839
|
|
faucetsdn__chewie-39
|
diff --git a/chewie/chewie.py b/chewie/chewie.py
index a046688..cfbb661 100644
--- a/chewie/chewie.py
+++ b/chewie/chewie.py
@@ -99,19 +99,19 @@ class Chewie:
def auth_failure(self, src_mac, port_id):
"""failure shim between faucet and chewie
- Args:
- src_mac (MacAddress): the mac of the failed supplicant
- port_id (MacAddress): the 'mac' identifier of what switch port
- the failure is on"""
+ Args:
+ src_mac (MacAddress): the mac of the failed supplicant
+ port_id (MacAddress): the 'mac' identifier of what switch port
+ the failure is on"""
if self.failure_handler:
self.failure_handler(src_mac, port_id)
def auth_logoff(self, src_mac, port_id):
"""logoff shim between faucet and chewie
- Args:
- src_mac (MacAddress): the mac of the logoff supplicant
- port_id (MacAddress): the 'mac' identifier of what switch port
- the logoff is on"""
+ Args:
+ src_mac (MacAddress): the mac of the logoff supplicant
+ port_id (MacAddress): the 'mac' identifier of what switch port
+ the logoff is on"""
if self.logoff_handler:
self.logoff_handler(src_mac, port_id)
@@ -144,7 +144,7 @@ class Chewie:
message, dst_mac = MessageParser.ethernet_parse(packed_message)
self.logger.info("eap EAP(): %s", message)
self.logger.info("Received message: %s" % message.__dict__)
- sm = self.get_state_machine(message.src_mac)
+ sm = self.get_state_machine(message.src_mac, dst_mac)
event = EventMessageReceived(message, dst_mac)
sm.event(event)
except Exception as e:
@@ -160,7 +160,7 @@ class Chewie:
try:
while True:
sleep(0)
- eap_message, src_mac, username, state = self.radius_output_messages.get()
+ eap_message, src_mac, username, state, port_id = self.radius_output_messages.get()
self.logger.info("got eap to send to radius.. mac: %s %s, username: %s",
type(src_mac), src_mac, username)
state_dict = None
@@ -169,7 +169,7 @@ class Chewie:
self.logger.info("Sending to RADIUS eap message %s with state %s",
eap_message.__dict__, state_dict)
radius_packet_id = self.get_next_radius_packet_id()
- self.packet_id_to_mac[radius_packet_id] = src_mac
+ self.packet_id_to_mac[radius_packet_id] = {'src_mac': src_mac, 'port_id': port_id}
# message is eap. needs to be wrapped into a radius packet.
request_authenticator = os.urandom(16)
self.packet_id_to_request_authenticator[radius_packet_id] = request_authenticator
@@ -258,17 +258,21 @@ class Chewie:
Returns:
FullEAPStateMachine
"""
- return self.get_state_machine(self.packet_id_to_mac[packet_id])
+ return self.get_state_machine(**self.packet_id_to_mac[packet_id])
- def get_state_machine(self, src_mac):
+ def get_state_machine(self, src_mac, port_id):
"""Gets or creates if it does not already exist an FullEAPStateMachine for the src_mac.
Args:
- src_mac (MACAddress): who's to get.
+ src_mac (MacAddress): who's to get.
+ port_id (MacAddress): ID of the port where the src_mac is.
Returns:
FullEAPStateMachine
"""
- sm = self.state_machines.get(src_mac, None)
+ port_sms = self.state_machines.get(str(port_id), None)
+ if port_sms is None:
+ self.state_machines[str(port_id)] = {}
+ sm = self.state_machines[str(port_id)].get(src_mac, None)
if not sm:
sm = FullEAPStateMachine(self.eap_output_messages, self.radius_output_messages, src_mac,
self.timer_scheduler, self.auth_success,
@@ -276,7 +280,7 @@ class Chewie:
sm.eapRestart = True
# TODO what if port is not actually enabled, but then how did they auth?
sm.portEnabled = True
- self.state_machines[src_mac] = sm
+ self.state_machines[str(port_id)][src_mac] = sm
return sm
def get_next_radius_packet_id(self):
diff --git a/chewie/eap_state_machine.py b/chewie/eap_state_machine.py
index aba5f5a..ac60dab 100644
--- a/chewie/eap_state_machine.py
+++ b/chewie/eap_state_machine.py
@@ -712,7 +712,8 @@ class FullEAPStateMachine:
if self.aaaEapRespData.code == Eap.RESPONSE:
self.radius_output_messages.put((self.aaaEapRespData, self.src_mac,
self.aaaIdentity.identity,
- self.radius_state_attribute))
+ self.radius_state_attribute,
+ self.port_id_mac))
self.sent_count += 1
self.set_timer()
self.aaaEapResp = False
|
faucetsdn/chewie
|
79e0c35e0089af91349fcd233d6419a25b28c25a
|
diff --git a/test/test_chewie.py b/test/test_chewie.py
new file mode 100644
index 0000000..931cdda
--- /dev/null
+++ b/test/test_chewie.py
@@ -0,0 +1,76 @@
+"""Unittests for chewie/chewie.py"""
+
+import logging
+import unittest
+
+from chewie.chewie import Chewie
+
+
+def auth_handler(chewie, client_mac, port_id_mac): # pylint: disable=unused-argument
+ """dummy handler for successful authentications"""
+ print('Successful auth from MAC %s on port: %s' % (str(client_mac), str(port_id_mac)))
+
+
+def failure_handler(chewie, client_mac, port_id_mac): # pylint: disable=unused-argument
+ """dummy handler for failed authentications"""
+ print('failure from MAC %s on port: %s' % (str(client_mac), str(port_id_mac)))
+
+
+def logoff_handler(chewie, client_mac, port_id_mac): # pylint: disable=unused-argument
+ """dummy handler for logoffs"""
+ print('logoff from MAC %s on port: %s' % (str(client_mac), str(port_id_mac)))
+
+
+class ChewieTestCase(unittest.TestCase):
+ """Main chewie.py test class"""
+
+ def setUp(self):
+ logger = logging.getLogger()
+
+ self.chewie = Chewie('lo', logger,
+ auth_handler, failure_handler, logoff_handler,
+ '127.0.0.1', 1812, 'SECRET',
+ '44:44:44:44:44:44')
+
+ def test_get_sm(self):
+ """Tests Chewie.get_state_machine()"""
+ self.assertEqual(len(self.chewie.state_machines), 0)
+ # creates the sm if it doesn't exist
+ sm = self.chewie.get_state_machine('12:34:56:78:9a:bc', # pylint: disable=invalid-name
+ '00:00:00:00:00:01')
+
+ self.assertEqual(len(self.chewie.state_machines), 1)
+
+ self.assertIs(sm, self.chewie.get_state_machine('12:34:56:78:9a:bc',
+ '00:00:00:00:00:01'))
+
+ self.assertIsNot(sm, self.chewie.get_state_machine('12:34:56:78:9a:bc',
+ '00:00:00:00:00:02'))
+ self.assertIsNot(sm, self.chewie.get_state_machine('ab:cd:ef:12:34:56',
+ '00:00:00:00:00:01'))
+
+ # 2 ports
+ self.assertEqual(len(self.chewie.state_machines), 2)
+ # port 1 has 2 macs
+ self.assertEqual(len(self.chewie.state_machines['00:00:00:00:00:01']), 2)
+ # port 2 has 1 mac
+ self.assertEqual(len(self.chewie.state_machines['00:00:00:00:00:02']), 1)
+
+ def test_get_sm_by_packet_id(self):
+ """Tests Chewie.get_sm_by_packet_id()"""
+ self.chewie.packet_id_to_mac[56] = {'src_mac': '12:34:56:78:9a:bc',
+ 'port_id': '00:00:00:00:00:01'}
+ sm = self.chewie.get_state_machine('12:34:56:78:9a:bc', # pylint: disable=invalid-name
+ '00:00:00:00:00:01')
+
+ self.assertIs(self.chewie.get_state_machine_from_radius_packet_id(56),
+ sm)
+ with self.assertRaises(KeyError):
+ self.chewie.get_state_machine_from_radius_packet_id(20)
+
+ def test_get_next_radius_packet_id(self):
+ """Tests Chewie.get_next_radius_packet_id()"""
+ for i in range(0, 260):
+ _i = i % 256
+ self.assertEqual(self.chewie.get_next_radius_packet_id(),
+ _i)
diff --git a/test/test_full_state_machine.py b/test/test_full_state_machine.py
index 713b8ae..18a6b23 100644
--- a/test/test_full_state_machine.py
+++ b/test/test_full_state_machine.py
@@ -161,9 +161,11 @@ class FullStateMachineStartTestCase(unittest.TestCase):
self.assertEqual(self.sm.currentState, self.sm.IDLE2)
self.assertEqual(self.eap_output_queue.qsize(), 1)
- self.assertIsInstance(self.eap_output_queue.get_nowait()[0], Md5ChallengeMessage)
+ output = self.eap_output_queue.get_nowait()[0]
+ self.assertIsInstance(output, Md5ChallengeMessage)
self.assertEqual(self.radius_output_queue.qsize(), 0)
+ return output
def test_md5_challenge_response(self):
self.test_md5_challenge_request()
@@ -224,9 +226,9 @@ class FullStateMachineStartTestCase(unittest.TestCase):
self.assertEqual(self.radius_output_queue.qsize(), 0)
def test_discard2(self):
- self.test_md5_challenge_request()
+ request = self.test_md5_challenge_request()
- message = Md5ChallengeMessage(self.src_mac, 222, Eap.RESPONSE,
+ message = Md5ChallengeMessage(self.src_mac, request.message_id + 10, Eap.RESPONSE,
build_byte_string("3a535f0ee8c6b34fe714aa7dad9a0e15"),
b"host1user")
self.sm.event(EventMessageReceived(message, None))
@@ -235,9 +237,9 @@ class FullStateMachineStartTestCase(unittest.TestCase):
self.assertEqual(self.radius_output_queue.qsize(), 0)
def test_discard(self):
- self.test_eap_start()
-
- message = IdentityMessage(self.src_mac, 40, Eap.RESPONSE, "host1user")
+ message = self.test_eap_start()
+ # Make a message that will be discarded (id here is not sequential)
+ message = IdentityMessage(self.src_mac, message.message_id + 10, Eap.RESPONSE, "host1user")
self.sm.event(EventMessageReceived(message, None))
self.assertEqual(self.sm.currentState, self.sm.IDLE)
|
Could verify that all EAP packets in a sequence come from the same port.
If we have 2 hosts with same MAC (e.g. 1 good, 1 Malicious) and they try to authenticate, it could be possible to authenticate on the malicious port when good is successful if malicious sends eap packet before the radius access-accept has been received, and thus setting the sm.port_id_mac.
Basically we want to stop this. 2 obvious ways.
1. Make a state machine tied to a mac and port. So in the above case there would be 2 state machines.
2. some sort of verification logic.
Option 1 looks easier to implement.
Will also handle if 2 good, both would auth.
|
0.0
|
79e0c35e0089af91349fcd233d6419a25b28c25a
|
[
"test/test_chewie.py::ChewieTestCase::test_get_sm",
"test/test_chewie.py::ChewieTestCase::test_get_sm_by_packet_id"
] |
[
"test/test_chewie.py::ChewieTestCase::test_get_next_radius_packet_id",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_disabled_state",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_discard",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_discard2",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_eap_restart",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_eap_start",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_failure2",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_identity_response",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_logoff2",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_logoff_from_idle2",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_md5_challenge_request",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_md5_challenge_response",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_success2",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_timeout_failure2_from_aaa_timeout",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_timeout_failure2_from_max_retransmits",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_timeout_failure_from_max_retransmits",
"test/test_full_state_machine.py::FullStateMachineStartTestCase::test_ttls_request"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-09-18 03:31:11+00:00
|
apache-2.0
| 2,317
|
|
Stewori__pytypes-98
|
diff --git a/pytypes/typechecker.py b/pytypes/typechecker.py
index a0cd92b..b6b4342 100644
--- a/pytypes/typechecker.py
+++ b/pytypes/typechecker.py
@@ -964,15 +964,19 @@ def typechecked_module(md, force_recursive = False):
"""
if not pytypes.checking_enabled:
return md
+ # Save input to return original string if input was a string.
+ md_arg = md
if isinstance(md, str):
if md in sys.modules:
md = sys.modules[md]
if md is None:
- return md
+ return md_arg
elif md in _pending_modules:
# if import is pending, we just store this call for later
_pending_modules[md].append(lambda t: typechecked_module(t, True))
- return md
+ return md_arg
+ else:
+ raise KeyError('Found no module {!r} to typecheck'.format(md))
assert(ismodule(md))
if md.__name__ in _pending_modules:
# if import is pending, we just store this call for later
@@ -981,7 +985,7 @@ def typechecked_module(md, force_recursive = False):
# todo: Issue warning here that not the whole module might be covered yet
if md.__name__ in _fully_typechecked_modules and \
_fully_typechecked_modules[md.__name__] == len(md.__dict__):
- return md
+ return md_arg
# To play it safe we avoid to modify the dict while iterating over it,
# so we previously cache keys.
# For this we don't use keys() because of Python 3.
@@ -997,7 +1001,7 @@ def typechecked_module(md, force_recursive = False):
typechecked_class(memb, force_recursive, force_recursive)
if not md.__name__ in _pending_modules:
_fully_typechecked_modules[md.__name__] = len(md.__dict__)
- return md
+ return md_arg
def typechecked(memb):
|
Stewori/pytypes
|
befcedde8cea9b189c643f905c2b7ad180f27f8e
|
diff --git a/tests/test_typechecker.py b/tests/test_typechecker.py
index fc4483d..60546b5 100644
--- a/tests/test_typechecker.py
+++ b/tests/test_typechecker.py
@@ -2651,8 +2651,14 @@ class TestTypecheck_module(unittest.TestCase):
def test_function_py2(self):
from testhelpers import modulewide_typecheck_testhelper_py2 as mth
self.assertEqual(mth.testfunc(3, 2.5, 'abcd'), (9, 7.5))
+ with self.assertRaises(KeyError):
+ pytypes.typechecked_module('nonexistent123')
self.assertEqual(mth.testfunc(3, 2.5, 7), (9, 7.5)) # would normally fail
- pytypes.typechecked_module(mth)
+ module_name = 'testhelpers.modulewide_typecheck_testhelper_py2'
+ returned_mth = pytypes.typechecked_module(module_name)
+ self.assertEqual(returned_mth, module_name)
+ returned_mth = pytypes.typechecked_module(mth)
+ self.assertEqual(returned_mth, mth)
self.assertEqual(mth.testfunc(3, 2.5, 'abcd'), (9, 7.5))
self.assertRaises(InputTypeError, lambda: mth.testfunc(3, 2.5, 7))
@@ -2662,7 +2668,8 @@ class TestTypecheck_module(unittest.TestCase):
from testhelpers import modulewide_typecheck_testhelper as mth
self.assertEqual(mth.testfunc(3, 2.5, 'abcd'), (9, 7.5))
self.assertEqual(mth.testfunc(3, 2.5, 7), (9, 7.5)) # would normally fail
- pytypes.typechecked_module(mth)
+ returned_mth = pytypes.typechecked_module(mth)
+ self.assertEqual(returned_mth, mth)
self.assertEqual(mth.testfunc(3, 2.5, 'abcd'), (9, 7.5))
self.assertRaises(InputTypeError, lambda: mth.testfunc(3, 2.5, 7))
|
typechecked(string) should have more consistent return type
Currently if you call `typechecked` on a string module name, it has case-by-case behavior to sometimes return the original string and sometimes return the resolved module:
```python
typechecked('requests') → <module 'requests'>
typechecked('requests') → 'requests'
```
Could we simplify to make it unconditionally return the value that was passed in, str→str, module→module, class→class, etc?
|
0.0
|
befcedde8cea9b189c643f905c2b7ad180f27f8e
|
[
"tests/test_typechecker.py::TestTypecheck_module::test_function_py2"
] |
[
"tests/test_typechecker.py::testClass2_defTimeCheck",
"tests/test_typechecker.py::testClass2_defTimeCheck2",
"tests/test_typechecker.py::testClass2_defTimeCheck3",
"tests/test_typechecker.py::testClass2_defTimeCheck4",
"tests/test_typechecker.py::testClass3_defTimeCheck",
"tests/test_typechecker.py::testClass2_defTimeCheck_init_ov",
"tests/test_typechecker.py::testfunc_check_argument_types_empty",
"tests/test_typechecker.py::testfunc_varargs1",
"tests/test_typechecker.py::testfunc_varargs4",
"tests/test_typechecker.py::testfunc_varargs_ca1",
"tests/test_typechecker.py::testfunc_varargs_ca4",
"tests/test_typechecker.py::TestTypecheck::test_abstract_override",
"tests/test_typechecker.py::TestTypecheck::test_annotations_from_typestring",
"tests/test_typechecker.py::TestTypecheck::test_callable",
"tests/test_typechecker.py::TestTypecheck::test_classmethod",
"tests/test_typechecker.py::TestTypecheck::test_custom_annotations",
"tests/test_typechecker.py::TestTypecheck::test_custom_generic",
"tests/test_typechecker.py::TestTypecheck::test_defaults_inferred_types",
"tests/test_typechecker.py::TestTypecheck::test_function",
"tests/test_typechecker.py::TestTypecheck::test_get_types",
"tests/test_typechecker.py::TestTypecheck::test_method",
"tests/test_typechecker.py::TestTypecheck::test_method_forward",
"tests/test_typechecker.py::TestTypecheck::test_parent_typecheck_no_override",
"tests/test_typechecker.py::TestTypecheck::test_property",
"tests/test_typechecker.py::TestTypecheck::test_staticmethod",
"tests/test_typechecker.py::TestTypecheck::test_typecheck_parent_type",
"tests/test_typechecker.py::TestTypecheck::test_typestring_varargs_syntax",
"tests/test_typechecker.py::TestTypecheck::test_typevar_class",
"tests/test_typechecker.py::TestTypecheck::test_typevar_collision",
"tests/test_typechecker.py::TestTypecheck::test_various",
"tests/test_typechecker.py::TestTypecheck_class::test_classmethod",
"tests/test_typechecker.py::TestTypecheck_class::test_method",
"tests/test_typechecker.py::TestTypecheck_class::test_staticmethod",
"tests/test_typechecker.py::TestTypecheck_module::test_function_py3",
"tests/test_typechecker.py::Test_check_argument_types::test_function",
"tests/test_typechecker.py::Test_check_argument_types::test_inner_class",
"tests/test_typechecker.py::Test_check_argument_types::test_inner_method",
"tests/test_typechecker.py::Test_check_argument_types::test_methods",
"tests/test_typechecker.py::TestOverride::test_auto_override",
"tests/test_typechecker.py::TestOverride::test_override",
"tests/test_typechecker.py::TestOverride::test_override_at_definition_time",
"tests/test_typechecker.py::TestOverride::test_override_at_definition_time_with_forward_decl",
"tests/test_typechecker.py::TestOverride::test_override_diamond",
"tests/test_typechecker.py::TestOverride::test_override_typecheck",
"tests/test_typechecker.py::TestOverride::test_override_typecheck_class",
"tests/test_typechecker.py::TestOverride::test_override_vararg",
"tests/test_typechecker.py::TestStubfile::test_annotations_from_stubfile_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_annotations_from_stubfile_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_callable_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_custom_generic_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_defaults_inferred_types_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_defaults_inferred_types_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_override_diamond_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_override_diamond_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_property_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_property_plain_3_5_stub",
"tests/test_typechecker.py::TestStubfile::test_typecheck_parent_type_plain_2_7_stub",
"tests/test_typechecker.py::TestStubfile::test_typecheck_parent_type_plain_3_5_stub",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_abstract_override_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_callable_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_classmethod_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_custom_generic_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_inferred_types",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_with_missing_annotations_class",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_with_missing_annotations_plain",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_with_missing_annotations_property",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_defaults_with_missing_annotations_static",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_function_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_get_types_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_method_forward_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_method_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_parent_typecheck_no_override_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_property",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_staticmethod_py3",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_typecheck_parent_type",
"tests/test_typechecker.py::TestTypecheck_Python3_5::test_various_py3",
"tests/test_typechecker.py::TestOverride_Python3_5::test_auto_override",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_at_definition_time",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_at_definition_time_with_forward_decl",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_diamond",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_py3",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_typecheck",
"tests/test_typechecker.py::TestOverride_Python3_5::test_override_vararg",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_function",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_inner_class",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_inner_method",
"tests/test_typechecker.py::Test_check_argument_types_Python3_5::test_methods",
"tests/test_typechecker.py::Test_utils::test_Generator_is_of_type",
"tests/test_typechecker.py::Test_utils::test_bound_typevars_readonly",
"tests/test_typechecker.py::Test_utils::test_empty_values",
"tests/test_typechecker.py::Test_utils::test_forward_declaration_infinite_recursion",
"tests/test_typechecker.py::Test_utils::test_has_type_hints_on_slot_wrapper",
"tests/test_typechecker.py::Test_utils::test_resolve_fw_decl",
"tests/test_typechecker.py::Test_utils::test_tuple_ellipsis",
"tests/test_typechecker.py::Test_utils::test_tuple_ellipsis_check",
"tests/test_typechecker.py::Test_utils::test_type_bases",
"tests/test_typechecker.py::Test_combine_argtype::test_exceptions",
"tests/test_typechecker.py::Test_combine_argtype::test_function",
"tests/test_typechecker.py::Test_agent::test_function_agent",
"tests/test_typechecker.py::Test_agent::test_init_agent_return_None",
"tests/test_typechecker.py::Test_agent::test_method_agent_return",
"tests/test_typechecker.py::Test_agent_Python3_5::test_function_agent",
"tests/test_typechecker.py::Test_agent_Python3_5::test_init_agent_return_None",
"tests/test_typechecker.py::Test_agent_Python3_5::test_method_agent_return"
] |
{
"failed_lite_validators": [
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-06 06:08:27+00:00
|
apache-2.0
| 721
|
|
fepegar__torchio-267
|
diff --git a/torchio/data/subject.py b/torchio/data/subject.py
index 0430423..76445e9 100644
--- a/torchio/data/subject.py
+++ b/torchio/data/subject.py
@@ -45,18 +45,15 @@ class Subject(dict):
'Only one dictionary as positional argument is allowed')
raise ValueError(message)
super().__init__(**kwargs)
- self.images = [
- (k, v) for (k, v) in self.items()
- if isinstance(v, Image)
- ]
- self._parse_images(self.images)
+ self._parse_images(self.get_images(intensity_only=False))
self.update_attributes() # this allows me to do e.g. subject.t1
self.history = []
def __repr__(self):
+ num_images = len(self.get_images(intensity_only=False))
string = (
f'{self.__class__.__name__}'
- f'(Keys: {tuple(self.keys())}; images: {len(self.images)})'
+ f'(Keys: {tuple(self.keys())}; images: {num_images})'
)
return string
@@ -84,27 +81,46 @@ class Subject(dict):
Consistency of shapes across images in the subject is checked first.
"""
- self.check_consistent_shape()
- image = self.get_images(intensity_only=False)[0]
- return image.shape
+ self.check_consistent_attribute('shape')
+ return self.get_first_image().shape
@property
def spatial_shape(self):
"""Return spatial shape of first image in subject.
- Consistency of shapes across images in the subject is checked first.
+ Consistency of spatial shapes across images in the subject is checked
+ first.
"""
- return self.shape[1:]
+ self.check_consistent_spatial_shape()
+ return self.get_first_image().spatial_shape
@property
def spacing(self):
"""Return spacing of first image in subject.
- Consistency of shapes across images in the subject is checked first.
+ Consistency of spacings across images in the subject is checked first.
"""
- self.check_consistent_shape()
- image = self.get_images(intensity_only=False)[0]
- return image.spacing
+ self.check_consistent_attribute('spacing')
+ return self.get_first_image().spacing
+
+ def check_consistent_attribute(self, attribute: str) -> None:
+ values_dict = {}
+ iterable = self.get_images_dict(intensity_only=False).items()
+ for image_name, image in iterable:
+ values_dict[image_name] = getattr(image, attribute)
+ num_unique_values = len(set(values_dict.values()))
+ if num_unique_values > 1:
+ message = (
+ f'More than one {attribute} found in subject images:'
+ f'\n{pprint.pformat(values_dict)}'
+ )
+ raise RuntimeError(message)
+
+ def check_consistent_shape(self) -> None:
+ self.check_consistent_attribute('shape')
+
+ def check_consistent_spatial_shape(self) -> None:
+ self.check_consistent_attribute('spatial_shape')
def get_images_dict(self, intensity_only=True):
images = {}
@@ -123,32 +139,6 @@ class Subject(dict):
def get_first_image(self):
return self.get_images(intensity_only=False)[0]
- def check_consistent_shape(self) -> None:
- shapes_dict = {}
- iterable = self.get_images_dict(intensity_only=False).items()
- for image_name, image in iterable:
- shapes_dict[image_name] = image.shape
- num_unique_shapes = len(set(shapes_dict.values()))
- if num_unique_shapes > 1:
- message = (
- 'Images in subject have inconsistent shapes:'
- f'\n{pprint.pformat(shapes_dict)}'
- )
- raise ValueError(message)
-
- def check_consistent_orientation(self) -> None:
- orientations_dict = {}
- iterable = self.get_images_dict(intensity_only=False).items()
- for image_name, image in iterable:
- orientations_dict[image_name] = image.orientation
- num_unique_orientations = len(set(orientations_dict.values()))
- if num_unique_orientations > 1:
- message = (
- 'Images in subject have inconsistent orientations:'
- f'\n{pprint.pformat(orientations_dict)}'
- )
- raise ValueError(message)
-
def add_transform(
self,
transform: 'Transform',
@@ -177,6 +167,9 @@ class Subject(dict):
# This allows to get images using attribute notation, e.g. subject.t1
self.__dict__.update(self)
- def add_image(self, image, image_name):
+ def add_image(self, image: Image, image_name: str) -> None:
self[image_name] = image
self.update_attributes()
+
+ def remove_image(self, image_name: str) -> None:
+ del self[image_name]
diff --git a/torchio/transforms/augmentation/spatial/random_affine.py b/torchio/transforms/augmentation/spatial/random_affine.py
index 1ace679..d561b2d 100644
--- a/torchio/transforms/augmentation/spatial/random_affine.py
+++ b/torchio/transforms/augmentation/spatial/random_affine.py
@@ -157,7 +157,7 @@ class RandomAffine(RandomTransform, SpatialTransform):
return transform
def apply_transform(self, sample: Subject) -> dict:
- sample.check_consistent_shape()
+ sample.check_consistent_spatial_shape()
params = self.get_params(
self.scales,
self.degrees,
diff --git a/torchio/transforms/augmentation/spatial/random_elastic_deformation.py b/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
index 654bf15..c6ea4a9 100644
--- a/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
+++ b/torchio/transforms/augmentation/spatial/random_elastic_deformation.py
@@ -215,7 +215,7 @@ class RandomElasticDeformation(RandomTransform, SpatialTransform):
warnings.warn(message, RuntimeWarning)
def apply_transform(self, sample: Subject) -> dict:
- sample.check_consistent_shape()
+ sample.check_consistent_spatial_shape()
bspline_params = self.get_params(
self.num_control_points,
self.max_displacement,
diff --git a/torchio/transforms/preprocessing/spatial/crop_or_pad.py b/torchio/transforms/preprocessing/spatial/crop_or_pad.py
index eadaed9..a40c343 100644
--- a/torchio/transforms/preprocessing/spatial/crop_or_pad.py
+++ b/torchio/transforms/preprocessing/spatial/crop_or_pad.py
@@ -99,7 +99,7 @@ class CropOrPad(BoundsTransform):
@staticmethod
def _get_sample_shape(sample: Subject) -> TypeTripletInt:
"""Return the shape of the first image in the sample."""
- sample.check_consistent_shape()
+ sample.check_consistent_spatial_shape()
for image_dict in sample.get_images(intensity_only=False):
data = image_dict.spatial_shape # remove channels dimension
break
|
fepegar/torchio
|
10f454b12581f8ea9ce03204dccab368e273086f
|
diff --git a/tests/data/test_subject.py b/tests/data/test_subject.py
index 164036b..72adad4 100644
--- a/tests/data/test_subject.py
+++ b/tests/data/test_subject.py
@@ -3,6 +3,7 @@
"""Tests for Subject."""
import tempfile
+import torch
from torchio import Subject, ScalarImage, RandomFlip
from ..utils import TorchioTestCase
@@ -30,3 +31,20 @@ class TestSubject(TorchioTestCase):
def test_history(self):
transformed = RandomFlip()(self.sample)
self.assertIs(len(transformed.history), 1)
+
+ def test_inconsistent_shape(self):
+ subject = Subject(
+ a=ScalarImage(tensor=torch.rand(1, 2, 3, 4)),
+ b=ScalarImage(tensor=torch.rand(2, 2, 3, 4)),
+ )
+ subject.spatial_shape
+ with self.assertRaises(RuntimeError):
+ subject.shape
+
+ def test_inconsistent_spatial_shape(self):
+ subject = Subject(
+ a=ScalarImage(tensor=torch.rand(1, 3, 3, 4)),
+ b=ScalarImage(tensor=torch.rand(2, 2, 3, 4)),
+ )
+ with self.assertRaises(RuntimeError):
+ subject.spatial_shape
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 6dfb126..8a062f3 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -43,13 +43,6 @@ class TestUtils(TorchioTestCase):
assert isinstance(guess_type('[1,3,5]'), list)
assert isinstance(guess_type('test'), str)
- def test_check_consistent_shape(self):
- good_sample = self.sample
- bad_sample = self.get_inconsistent_sample()
- good_sample.check_consistent_shape()
- with self.assertRaises(ValueError):
- bad_sample.check_consistent_shape()
-
def test_apply_transform_to_file(self):
transform = RandomFlip()
apply_transform_to_file(
diff --git a/tests/transforms/test_transforms.py b/tests/transforms/test_transforms.py
index a5c15ad..9a62306 100644
--- a/tests/transforms/test_transforms.py
+++ b/tests/transforms/test_transforms.py
@@ -51,6 +51,24 @@ class TestTransforms(TorchioTestCase):
transforms.append(torchio.RandomLabelsToImage(label_key='label'))
return torchio.Compose(transforms)
+ def test_transforms_dict(self):
+ transform = torchio.RandomNoise(keys=('t1', 't2'))
+ input_dict = {k: v.data for (k, v) in self.sample.items()}
+ transformed = transform(input_dict)
+ self.assertIsInstance(transformed, dict)
+
+ def test_transforms_dict_no_keys(self):
+ transform = torchio.RandomNoise()
+ input_dict = {k: v.data for (k, v) in self.sample.items()}
+ with self.assertRaises(RuntimeError):
+ transform(input_dict)
+
+ def test_transforms_image(self):
+ transform = self.get_transform(
+ channels=('default_image_name',), labels=False)
+ transformed = transform(self.sample.t1)
+ self.assertIsInstance(transformed, torchio.ScalarImage)
+
def test_transforms_tensor(self):
tensor = torch.rand(2, 4, 5, 8)
transform = self.get_transform(
@@ -136,3 +154,9 @@ class TestTransforms(TorchioTestCase):
original_data,
f'Changes after {transform.name}'
)
+
+
+class TestTransform(TorchioTestCase):
+ def test_abstract_transform(self):
+ with self.assertRaises(TypeError):
+ transform = torchio.Transform()
|
ValueError in Subject.spatial_shape
**🐛Bug**
<!-- A clear and concise description of what the bug is -->
An error is raised if images have different number of channels, even if the spatial shape of all images are the same. This happens because it is computed using Subject.shape. It used to work until we added support for 4D images.
**To reproduce**
<!-- Try to provide a minimal working example: https://stackoverflow.com/help/minimal-reproducible-example -->
```python
import torchio as tio
icbm = tio.datasets.ICBM2009CNonlinearSymmetryc()
del icbm['face'] # this one does have a different spatial shape
icbm.spatial_shape
```
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
```python-traceback
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-2-6b7dc2edb3cc> in <module>
----> 1 icbm.spatial_shape
~/git/torchio/torchio/data/subject.py in spatial_shape(self)
95 Consistency of shapes across images in the subject is checked first.
96 """
---> 97 return self.shape[1:]
98
99 @property
~/git/torchio/torchio/data/subject.py in shape(self)
85 Consistency of shapes across images in the subject is checked first.
86 """
---> 87 self.check_consistent_shape()
88 image = self.get_images(intensity_only=False)[0]
89 return image.shape
~/git/torchio/torchio/data/subject.py in check_consistent_shape(self)
135 f'\n{pprint.pformat(shapes_dict)}'
136 )
--> 137 raise ValueError(message)
138
139 def check_consistent_orientation(self) -> None:
ValueError: Images in subject have inconsistent shapes:
{'brain': (1, 193, 229, 193),
'eyes': (1, 193, 229, 193),
'pd': (1, 193, 229, 193),
't1': (1, 193, 229, 193),
't2': (1, 193, 229, 193),
'tissues': (3, 193, 229, 193)}
```
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
It should check for spatial shapes of images only.
**TorchIO version**
`0.17.26`.
|
0.0
|
10f454b12581f8ea9ce03204dccab368e273086f
|
[
"tests/data/test_subject.py::TestSubject::test_inconsistent_shape",
"tests/data/test_subject.py::TestSubject::test_inconsistent_spatial_shape"
] |
[
"tests/data/test_subject.py::TestSubject::test_history",
"tests/data/test_subject.py::TestSubject::test_input_dict",
"tests/data/test_subject.py::TestSubject::test_no_sample",
"tests/data/test_subject.py::TestSubject::test_positional_args",
"tests/test_utils.py::TestUtils::test_apply_transform_to_file",
"tests/test_utils.py::TestUtils::test_get_stem",
"tests/test_utils.py::TestUtils::test_guess_type",
"tests/test_utils.py::TestUtils::test_sitk_to_nib",
"tests/test_utils.py::TestUtils::test_to_tuple",
"tests/test_utils.py::TestNibabelToSimpleITK::test_2d_3d_multi",
"tests/test_utils.py::TestNibabelToSimpleITK::test_2d_3d_single",
"tests/test_utils.py::TestNibabelToSimpleITK::test_2d_multi",
"tests/test_utils.py::TestNibabelToSimpleITK::test_2d_single",
"tests/test_utils.py::TestNibabelToSimpleITK::test_3d_multi",
"tests/test_utils.py::TestNibabelToSimpleITK::test_3d_single",
"tests/test_utils.py::TestNibabelToSimpleITK::test_wrong_dims",
"tests/transforms/test_transforms.py::TestTransforms::test_transform_noop",
"tests/transforms/test_transforms.py::TestTransforms::test_transforms_dict",
"tests/transforms/test_transforms.py::TestTransforms::test_transforms_dict_no_keys",
"tests/transforms/test_transforms.py::TestTransform::test_abstract_transform"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-12 17:30:18+00:00
|
mit
| 2,333
|
|
huggingface__transformers-30192
|
diff --git a/docs/source/en/perf_infer_gpu_one.md b/docs/source/en/perf_infer_gpu_one.md
index 64583e4ba..de49d4427 100644
--- a/docs/source/en/perf_infer_gpu_one.md
+++ b/docs/source/en/perf_infer_gpu_one.md
@@ -194,6 +194,7 @@ For now, Transformers supports SDPA inference and training for the following arc
* [Bert](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertModel)
* [Cohere](https://huggingface.co/docs/transformers/model_doc/cohere#transformers.CohereModel)
* [Dbrx](https://huggingface.co/docs/transformers/model_doc/dbrx#transformers.DbrxModel)
+* [Dpr](https://huggingface.co/docs/transformers/model_doc/dpr#transformers.DprReader)
* [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel)
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
* [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel)
diff --git a/examples/pytorch/speech-recognition/README.md b/examples/pytorch/speech-recognition/README.md
index b9cab9513..4990219f4 100644
--- a/examples/pytorch/speech-recognition/README.md
+++ b/examples/pytorch/speech-recognition/README.md
@@ -368,6 +368,7 @@ python run_speech_recognition_seq2seq.py \
--dataset_name="mozilla-foundation/common_voice_11_0" \
--dataset_config_name="hi" \
--language="hindi" \
+ --task="transcribe" \
--train_split_name="train+validation" \
--eval_split_name="test" \
--max_steps="5000" \
@@ -384,12 +385,10 @@ python run_speech_recognition_seq2seq.py \
--save_steps="1000" \
--generation_max_length="225" \
--preprocessing_num_workers="16" \
- --length_column_name="input_length" \
--max_duration_in_seconds="30" \
--text_column_name="sentence" \
--freeze_feature_encoder="False" \
--gradient_checkpointing \
- --group_by_length \
--fp16 \
--overwrite_output_dir \
--do_train \
@@ -399,7 +398,8 @@ python run_speech_recognition_seq2seq.py \
```
On a single V100, training should take approximately 8 hours, with a final cross-entropy loss of **1e-4** and word error rate of **32.6%**.
-If training on a different language, you should be sure to change the `language` argument. The `language` argument should be omitted for English speech recognition.
+If training on a different language, you should be sure to change the `language` argument. The `language` and `task`
+arguments should be omitted for English speech recognition.
#### Multi GPU Whisper Training
The following example shows how to fine-tune the [Whisper small](https://huggingface.co/openai/whisper-small) checkpoint on the Hindi subset of [Common Voice 11](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) using 2 GPU devices in half-precision:
@@ -410,6 +410,7 @@ torchrun \
--dataset_name="mozilla-foundation/common_voice_11_0" \
--dataset_config_name="hi" \
--language="hindi" \
+ --task="transcribe" \
--train_split_name="train+validation" \
--eval_split_name="test" \
--max_steps="5000" \
@@ -425,12 +426,10 @@ torchrun \
--save_steps="1000" \
--generation_max_length="225" \
--preprocessing_num_workers="16" \
- --length_column_name="input_length" \
--max_duration_in_seconds="30" \
--text_column_name="sentence" \
--freeze_feature_encoder="False" \
--gradient_checkpointing \
- --group_by_length \
--fp16 \
--overwrite_output_dir \
--do_train \
diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
index 3a596e2cb..f352954d8 100755
--- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
+++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py
@@ -119,17 +119,16 @@ class ModelArguments:
)
forced_decoder_ids: List[List[int]] = field(
default=None,
- metadata={
+ metadata={"help": "Deprecated. Please use the `language` and `task` arguments instead."},
+ )
+ suppress_tokens: List[int] = field(
+ default=None, metadata={
"help": (
- "A list of pairs of integers which indicates a mapping from generation indices to token indices "
- "that will be forced before sampling. For example, [[0, 123]] means the first generated token "
- "will always be a token of index 123."
+ "Deprecated. The use of `suppress_tokens` should not be required for the majority of fine-tuning examples."
+ "Should you need to use `suppress_tokens`, please manually update them in the fine-tuning script directly."
)
},
)
- suppress_tokens: List[int] = field(
- default=None, metadata={"help": "A list of tokens that will be suppressed at generation."}
- )
apply_spec_augment: bool = field(
default=False,
metadata={
@@ -400,8 +399,6 @@ def main():
trust_remote_code=model_args.trust_remote_code,
)
- config.update({"forced_decoder_ids": model_args.forced_decoder_ids, "suppress_tokens": model_args.suppress_tokens})
-
# SpecAugment for whisper models
if getattr(config, "model_type", None) == "whisper":
config.update({"apply_spec_augment": model_args.apply_spec_augment})
@@ -440,9 +437,35 @@ def main():
model.freeze_encoder()
model.model.encoder.gradient_checkpointing = False
- if data_args.language is not None:
- # We only need to set the task id when the language is specified (i.e. in a multilingual setting)
+ if hasattr(model.generation_config, "is_multilingual") and model.generation_config.is_multilingual:
+ # We only need to set the language and task ids in a multilingual setting
tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task)
+ model.generation_config.update(
+ **{
+ "language": data_args.language,
+ "task": data_args.task,
+ }
+ )
+ elif data_args.language is not None:
+ raise ValueError(
+ "Setting language token for an English-only checkpoint is not permitted. The language argument should "
+ "only be set for multilingual checkpoints."
+ )
+
+ # TODO (Sanchit): deprecate these arguments in v4.41
+ if model_args.forced_decoder_ids is not None:
+ logger.warning(
+ "The use of `forced_decoder_ids` is deprecated and will be removed in v4.41."
+ "Please use the `language` and `task` arguments instead"
+ )
+ model.generation_config.forced_decoder_ids = model_args.forced_decoder_ids
+
+ if model_args.suppress_tokens is not None:
+ logger.warning(
+ "The use of `suppress_tokens` is deprecated and will be removed in v4.41."
+ "Should you need `suppress_tokens`, please manually set them in the fine-tuning script."
+ )
+ model.generation_config.suppress_tokens = model_args.suppress_tokens
# 6. Resample speech dataset if necessary
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py
index e4a55b345..7d71fc982 100644
--- a/src/transformers/image_utils.py
+++ b/src/transformers/image_utils.py
@@ -320,7 +320,7 @@ def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] =
# Try to load as base64
try:
- b64 = base64.b64decode(image, validate=True)
+ b64 = base64.decodebytes(image.encode())
image = PIL.Image.open(BytesIO(b64))
except Exception as e:
raise ValueError(
diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py
index 0a45ec752..928f2b931 100644
--- a/src/transformers/models/dpr/modeling_dpr.py
+++ b/src/transformers/models/dpr/modeling_dpr.py
@@ -142,6 +142,8 @@ class DPRReaderOutput(ModelOutput):
class DPRPreTrainedModel(PreTrainedModel):
+ _supports_sdpa = True
+
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
diff --git a/src/transformers/models/grounding_dino/modeling_grounding_dino.py b/src/transformers/models/grounding_dino/modeling_grounding_dino.py
index 83009c925..da8dd29a5 100644
--- a/src/transformers/models/grounding_dino/modeling_grounding_dino.py
+++ b/src/transformers/models/grounding_dino/modeling_grounding_dino.py
@@ -2113,7 +2113,9 @@ class GroundingDinoModel(GroundingDinoPreTrainedModel):
)
# Create text backbone
- self.text_backbone = AutoModel.from_config(config.text_config, add_pooling_layer=False)
+ self.text_backbone = AutoModel.from_config(
+ config.text_config, add_pooling_layer=False, attn_implementation=config._attn_implementation
+ )
self.text_projection = nn.Linear(config.text_config.hidden_size, config.d_model)
if config.embedding_init_target or not config.two_stage:
|
huggingface/transformers
|
aafa7ce72b65c730788c122a72a974e464409e9a
|
diff --git a/tests/utils/test_image_utils.py b/tests/utils/test_image_utils.py
index d6bc9a375..f360c4bb8 100644
--- a/tests/utils/test_image_utils.py
+++ b/tests/utils/test_image_utils.py
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import codecs
import os
import tempfile
import unittest
@@ -544,6 +545,23 @@ class LoadImageTester(unittest.TestCase):
self.assertEqual(img_arr.shape, (64, 32, 3))
+ def test_load_img_base64_encoded_bytes(self):
+ try:
+ tmp_file = tempfile.mktemp()
+ with open(tmp_file, "wb") as f:
+ http_get(
+ "https://huggingface.co/datasets/hf-internal-testing/dummy-base64-images/raw/main/image_2.txt", f
+ )
+
+ with codecs.open(tmp_file, encoding="unicode_escape") as b64:
+ img = load_image(b64.read())
+ img_arr = np.array(img)
+
+ finally:
+ os.remove(tmp_file)
+
+ self.assertEqual(img_arr.shape, (256, 256, 3))
+
def test_load_img_rgba(self):
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
|
Image classification pipeline fails with base64.encodebytes() input
### System Info
transformers - 4.39.1
python 3.10.11
platform - ubuntu 22.04
### Who can help?
_No response_
### Information
- [ ] The official example scripts
- [X] My own modified scripts
### Tasks
- [ ] An officially supported task in the `examples` folder (such as GLUE/SQuAD, ...)
- [ ] My own task or dataset (give details below)
### Reproduction
``` python
from PIL import Image
import base64
from io import BytesIO
from transformers import AutoModelForImageClassification, AutoFeatureExtractor
model = AutoModelForImageClassification.from_pretrained("microsoft/resnet-18")
tokenizer = AutoFeatureExtractor.from_pretrained("microsoft/resnet-18")
tr_model = {"model": model, "image_processor": tokenizer}
vision_model = transformers.pipeline(
task="image-classification", model=model, image_processor=tokenizer
)
buffered = BytesIO()
im = torch.rand((256, 256,3))
image = Image.fromarray(im.numpy().astype('uint8'), 'RGB')
image.save(buffered, format="JPEG")
img_str1 = base64.b64encode(buffered.getvalue()).decode("utf-8")
pred1 = vision_model.predict(img_str1)
img_str2 = base64.encodebytes(buffered.getvalue()).decode("utf-8")
pred2 = vision_model.predict(img_str2)`
```
results in this error:
Traceback (most recent call last):
File "/home/azureuser/workspace/AzureMlCli/test_oss.py", line 22, in <module>
pred2 = vision_model.predict(img_str2)
File "/anaconda/envs/vision_finetune/lib/python3.10/site-packages/transformers/pipelines/base.py", line 921, in predict
return self(X)
File "/anaconda/envs/vision_finetune/lib/python3.10/site-packages/transformers/pipelines/image_classification.py", line 158, in __call__
return super().__call__(images, **kwargs)
File "/anaconda/envs/vision_finetune/lib/python3.10/site-packages/transformers/pipelines/base.py", line 1162, in __call__
return self.run_single(inputs, preprocess_params, forward_params, postprocess_params)
File "/anaconda/envs/vision_finetune/lib/python3.10/site-packages/transformers/pipelines/base.py", line 1168, in run_single
model_inputs = self.preprocess(inputs, **preprocess_params)
File "/anaconda/envs/vision_finetune/lib/python3.10/site-packages/transformers/pipelines/image_classification.py", line 161, in preprocess
image = load_image(image, timeout=timeout)
File "/anaconda/envs/vision_finetune/lib/python3.10/site-packages/transformers/image_utils.py", line 326, in load_image
raise ValueError(
ValueError: Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got /9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0a
HBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIy
MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAEAAQADASIA
AhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQA
AAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3
ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWm
p6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEA
AwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSEx
BhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElK
U1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3
uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD5/ooo
oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiig
AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAC
iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKK
KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooo
oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiig
AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAC
iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKK
KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooo
oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiig
AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAC
iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKK
KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooo
oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiig
AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAC
iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKK
KKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooo
oAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiig
AooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA//2Q==
. Failed with Non-base64 digit found
Image encoded with b64encode is successful, but encodebytes results in the above error.
### Expected behavior
Image classification pipeline works well when input is given in base64.base64encode() format but fails with base64.encodebytes() format. Shouldn't the pipeline work on both the input formats?
|
0.0
|
aafa7ce72b65c730788c122a72a974e464409e9a
|
[
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_base64_encoded_bytes"
] |
[
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_center_crop_array",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_center_crop_image",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_center_crop_tensor",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_conversion_array_to_array",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_conversion_array_to_image",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_conversion_image_to_array",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_conversion_image_to_image",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_conversion_tensor_to_image",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_conversion_torch_to_array",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_make_list_of_images_numpy",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_make_list_of_images_torch",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_normalize_array",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_normalize_image",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_normalize_tensor",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_resize_image_and_array",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_resize_image_and_array_non_default_to_square",
"tests/utils/test_image_utils.py::ImageFeatureExtractionTester::test_resize_tensor",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_base64",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_base64_prefix",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_exif_transpose",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_l",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_la",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_local",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_rgba",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_url",
"tests/utils/test_image_utils.py::LoadImageTester::test_load_img_url_timeout",
"tests/utils/test_image_utils.py::UtilFunctionTester::test_get_channel_dimension_axis",
"tests/utils/test_image_utils.py::UtilFunctionTester::test_get_image_size",
"tests/utils/test_image_utils.py::UtilFunctionTester::test_infer_channel_dimension"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-04-11 16:30:34+00:00
|
apache-2.0
| 2,754
|
|
zooniverse__panoptes-python-client-207
|
diff --git a/panoptes_client/panoptes.py b/panoptes_client/panoptes.py
index 3f59a9b..463751c 100644
--- a/panoptes_client/panoptes.py
+++ b/panoptes_client/panoptes.py
@@ -935,7 +935,7 @@ class LinkResolver(object):
def __setattr__(self, name, value):
reserved_names = ('raw', 'parent')
- if name not in reserved_names and name in self.parent.raw['links']:
+ if name not in reserved_names and name not in dir(self):
if not self.parent._loaded:
self.parent.reload()
if isinstance(value, PanoptesObject):
|
zooniverse/panoptes-python-client
|
a6f91519326e3be7e974b88ce4f8805c5db9a0e4
|
diff --git a/panoptes_client/tests/test_linkresolver.py b/panoptes_client/tests/test_linkresolver.py
new file mode 100644
index 0000000..72555c6
--- /dev/null
+++ b/panoptes_client/tests/test_linkresolver.py
@@ -0,0 +1,23 @@
+from __future__ import absolute_import, division, print_function
+
+import unittest
+import sys
+
+if sys.version_info <= (3, 0):
+ from mock import Mock
+else:
+ from unittest.mock import Mock
+
+from panoptes_client.panoptes import LinkResolver
+
+
+class TestLinkResolver(unittest.TestCase):
+ def test_set_new_link(self):
+ parent = Mock()
+ parent.raw = {'links': {}}
+
+ target = Mock()
+
+ resolver = LinkResolver(parent)
+ resolver.newlink = target
+ self.assertEqual(parent.raw['links'].get('newlink', None), target)
diff --git a/panoptes_client/tests/test_subject_set.py b/panoptes_client/tests/test_subject_set.py
new file mode 100644
index 0000000..97d33cd
--- /dev/null
+++ b/panoptes_client/tests/test_subject_set.py
@@ -0,0 +1,42 @@
+from __future__ import absolute_import, division, print_function
+
+import unittest
+import sys
+
+if sys.version_info <= (3, 0):
+ from mock import patch, Mock
+else:
+ from unittest.mock import patch, Mock
+
+from panoptes_client.subject_set import SubjectSet
+
+
+class TestSubjectSet(unittest.TestCase):
+ def test_create(self):
+ with patch('panoptes_client.panoptes.Panoptes') as pc:
+ pc.client().post = Mock(return_value=(
+ {
+ 'subject_sets': [{
+ 'id': 0,
+ 'display_name': '',
+ }],
+ },
+ '',
+ ))
+ subject_set = SubjectSet()
+ subject_set.links.project = 1234
+ subject_set.display_name = 'Name'
+ subject_set.save()
+
+ pc.client().post.assert_called_with(
+ '/subject_sets',
+ json={
+ 'subject_sets': {
+ 'display_name': 'Name',
+ 'links': {
+ 'project': 1234,
+ }
+ }
+ },
+ etag=None,
+ )
|
Cannot create and save new subject set since 1.1
This [script](https://github.com/miclaraia/muon_analysis/blob/master/muon/scripts/test_panoptes_connection.py) illustrates the problem. The script fails on `subject_set.save()`. Mimics the instructions in the tutorial detailed in the [docs](https://panoptes-python-client.readthedocs.io/en/v1.1/user_guide.html#tutorial-creating-a-new-project).
Tries to get a project, create a subject set, link the subject set to the project, then save the subject set. The following trace is shown on `subject_set.save()`:
```
File "test_panoptes_connection.py", line 23, in main
subject_set.save()
File ".../venv/lib/python3.6/site-packages/panoptes_client/panoptes.py", line 815, in save
etag=self.etag
File ".../venv/lib/python3.6/site-packages/panoptes_client/panoptes.py", line 404, in post
retry=retry,
File ".../venv/lib/python3.6/site-packages/panoptes_client/panoptes.py", line 281, in json_request
json_response['errors']
panoptes_client.panoptes.PanoptesAPIException: {"schema"=>"did not contain a required property of 'links'"}
```
|
0.0
|
a6f91519326e3be7e974b88ce4f8805c5db9a0e4
|
[
"panoptes_client/tests/test_linkresolver.py::TestLinkResolver::test_set_new_link",
"panoptes_client/tests/test_subject_set.py::TestSubjectSet::test_create"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-22 16:58:26+00:00
|
apache-2.0
| 6,355
|
|
ethereum__py_ecc-6
|
diff --git a/py_ecc/secp256k1/secp256k1.py b/py_ecc/secp256k1/secp256k1.py
index 68f1ca1..db93ee5 100644
--- a/py_ecc/secp256k1/secp256k1.py
+++ b/py_ecc/secp256k1/secp256k1.py
@@ -6,8 +6,11 @@ import sys
if sys.version_info.major == 2:
safe_ord = ord
else:
- def safe_ord(x):
- return x
+ def safe_ord(value):
+ if isinstance(value, int):
+ return value
+ else:
+ return ord(value)
# Elliptic curve parameters (secp256k1)
|
ethereum/py_ecc
|
812722660d87c5c7366b1b4927dbf49a8ecbf3f9
|
diff --git a/tests/test_secp256k1.py b/tests/test_secp256k1.py
index c7fbe23..a56bba1 100644
--- a/tests/test_secp256k1.py
+++ b/tests/test_secp256k1.py
@@ -12,3 +12,11 @@ def test_privtopub():
def test_ecdsa_raw_sign():
v, r, s = ecdsa_raw_sign(b'\x35' * 32, priv)
assert ecdsa_raw_recover(b'\x35' * 32, (v, r, s)) == pub
+
+
+def test_issue_4_bug():
+ unsigned_message = '6a74f15f29c3227c5d1d2e27894da58d417a484ef53bc7aa57ee323b42ded656'
+ v = 28
+ r = int("5897c2c7c7412b0a555fb6f053ddb6047c59666bbebc6f5573134e074992d841", 16)
+ s = int("1c71d1c62b74caff8695a186e2a24dd701070ba9946748318135e3ac0950b1d4", 16)
+ ecdsa_raw_recover(unsigned_message, (v, r, s))
|
secp256k1.py fails on Python 3.4
Error test:
```python
import binascii
import rlp
# sha3 from module `pysha3` not `ssh3`
import sha3
from py_ecc.secp256k1 import ecdsa_raw_recover
n = 0
p = 20000000000
g = 100000
v = 1000
Tn = ''
Tp = p.to_bytes((p.bit_length()//8) + 1,byteorder='big')
Tg = g.to_bytes((g.bit_length()//8) + 1,byteorder='big')
Tt = binascii.unhexlify("687422eEA2cB73B5d3e242bA5456b782919AFc85")
Tv = v.to_bytes((v.bit_length()//8) + 1,byteorder='big')
Td = binascii.unhexlify("c0de")
transaction = [Tn, Tp, Tg, Tt, Tv, Td]
rlp_data=rlp.encode(transaction)
unsigned_message=sha3.keccak_256(rlp_data).hexdigest()
v = 28
r = int("5897c2c7c7412b0a555fb6f053ddb6047c59666bbebc6f5573134e074992d841",16)
s = int("1c71d1c62b74caff8695a186e2a24dd701070ba9946748318135e3ac0950b1d4",16)
ecdsa_raw_recover(unsigned_message, (v, r, s))
```
Error message:
> Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/apalau/python3.4/lib64/python3.4/site-packages/py_ecc/secp256k1/secp256k1.py", line 132, in ecdsa_raw_recover
z = bytes_to_int(msghash)
File "/home/apalau/python3.4/lib64/python3.4/site-packages/py_ecc/secp256k1/secp256k1.py", line 21, in bytes_to_int
o = (o << 8) + safe_ord(b)
TypeError: unsupported operand type(s) for +: 'int' and 'str'
On Python 2.7 the same `ecdsa_raw_recover(unsigned_message, (v, r, s))` works well.
Python version:
> python --version
> Python 3.4.5
|
0.0
|
812722660d87c5c7366b1b4927dbf49a8ecbf3f9
|
[
"tests/test_secp256k1.py::test_issue_4_bug"
] |
[
"tests/test_secp256k1.py::test_privtopub",
"tests/test_secp256k1.py::test_ecdsa_raw_sign"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2017-11-15 03:22:45+00:00
|
mit
| 2,198
|
|
googleapis__synthtool-1951
|
diff --git a/synthtool/gcp/templates/python_library/.github/blunderbuss.yml b/synthtool/gcp/templates/python_library/.github/blunderbuss.yml
index 18a3ed8..22ab79e 100644
--- a/synthtool/gcp/templates/python_library/.github/blunderbuss.yml
+++ b/synthtool/gcp/templates/python_library/.github/blunderbuss.yml
@@ -5,17 +5,23 @@
# team, please update `codeowner_team` in `.repo-metadata.json`.
{% if metadata['repo']['codeowner_team']|length -%}
assign_issues:
- - {{ metadata['repo']['codeowner_team'].replace("@","") }}
+ {%- for codeowner in metadata['repo']['codeowner_team'].replace("@","").split(" ") %}
+ - {{ codeowner }}
+ {%- endfor %}
assign_issues_by:
- labels:
- "samples"
to:
- googleapis/python-samples-reviewers
- - {{ metadata['repo']['codeowner_team'].replace("@","") }}
+ {%- for codeowner in metadata['repo']['codeowner_team'].replace("@","").split(" ") %}
+ - {{ codeowner }}
+ {%- endfor %}
assign_prs:
- - {{ metadata['repo']['codeowner_team'].replace("@","") }}
+ {%- for codeowner in metadata['repo']['codeowner_team'].replace("@","").split(" ") %}
+ - {{ codeowner }}
+ {%- endfor %}
{% else %}
assign_issues:
- googleapis/python-core-client-libraries
|
googleapis/synthtool
|
223f39e29577145d4238a522633c2f3e5e6dc8dc
|
diff --git a/tests/test_python_library.py b/tests/test_python_library.py
index 0019ba7..b4d5019 100644
--- a/tests/test_python_library.py
+++ b/tests/test_python_library.py
@@ -178,7 +178,7 @@ def assert_valid_yaml(file):
pytest.fail(f"unable to parse YAML: {file}")
-def test_library_blunderbuss():
+def test_library_blunderbuss_single_codeowner():
t = templates.Templates(PYTHON_LIBRARY / ".github")
result = t.render(
"blunderbuss.yml",
@@ -188,6 +188,7 @@ def test_library_blunderbuss():
config = yaml.safe_load(result)
assert "googleapis/python-core-client-libraries" not in config["assign_issues"]
assert "googleapis/foo" in config["assign_issues"]
+ assert "googleapis/foo" in config["assign_prs"]
assert (
"googleapis/python-samples-reviewers" in config["assign_issues_by"][0]["to"]
)
@@ -196,6 +197,28 @@ def test_library_blunderbuss():
pytest.fail(f"unable to parse YAML: {result}")
+def test_library_blunderbuss_multiple_codeowner():
+ t = templates.Templates(PYTHON_LIBRARY / ".github")
+ result = t.render(
+ "blunderbuss.yml",
+ metadata={"repo": {"codeowner_team": "googleapis/foo googleapis/bar"}},
+ ).read_text()
+ try:
+ config = yaml.safe_load(result)
+ assert "googleapis/python-core-client-libraries" not in config["assign_issues"]
+ assert "googleapis/foo" in config["assign_issues"]
+ assert "googleapis/bar" in config["assign_issues"]
+ assert "googleapis/foo" in config["assign_prs"]
+ assert "googleapis/bar" in config["assign_prs"]
+ assert (
+ "googleapis/python-samples-reviewers" in config["assign_issues_by"][0]["to"]
+ )
+ assert "googleapis/foo" in config["assign_issues_by"][0]["to"]
+ assert "googleapis/bar" in config["assign_issues_by"][0]["to"]
+ except yaml.YAMLError:
+ pytest.fail(f"unable to parse YAML: {result}")
+
+
def test_library_blunderbuss_no_codeowner():
t = templates.Templates(PYTHON_LIBRARY / ".github")
result = t.render(
|
Support multiple `codeowner_team`s in `.repo-metadata.json`
Today you can add a `codeowner_team` to `.repo-metadata.json`, and it will add that team as a CODEOWNER along side the language specific team. There are cases where we want multiple teams, often both a DPE team and a product engineering team that need CODEOWERS access. Ideally, `codeowner_team` would support both a string, and an array.
Today, we would need to:
- hand modify the `CODEOWNERS` file
- Add `CODEOWNERS` to the ignore list `synth.py` when copying templates
|
0.0
|
223f39e29577145d4238a522633c2f3e5e6dc8dc
|
[
"tests/test_python_library.py::test_library_blunderbuss_multiple_codeowner"
] |
[
"tests/test_python_library.py::test_library_noxfile[template_kwargs0-expected_text0]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs1-expected_text1]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs2-expected_text2]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs3-expected_text3]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs4-SYSTEM_TEST_EXTRAS:",
"tests/test_python_library.py::test_library_noxfile[template_kwargs5-expected_text5]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs6-expected_text6]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs7-expected_text7]",
"tests/test_python_library.py::test_library_noxfile[template_kwargs8-expected_text8]",
"tests/test_python_library.py::test_library_codeowners",
"tests/test_python_library.py::test_library_codeowners_without_metadata",
"tests/test_python_library.py::test_library_blunderbuss_single_codeowner",
"tests/test_python_library.py::test_library_blunderbuss_no_codeowner",
"tests/test_python_library.py::test_python_library",
"tests/test_python_library.py::test_split_system_tests",
"tests/test_python_library.py::test_configure_previous_major_version_branches[fixtures_dir0]",
"tests/test_python_library.py::test_configure_previous_major_version_branches[fixtures_dir1]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2024-04-10 11:39:56+00:00
|
apache-2.0
| 2,633
|
|
Stranger6667__pyanyapi-42
|
diff --git a/.travis.yml b/.travis.yml
index 1975b26..b7b5b14 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,17 +1,30 @@
language: python
python:
- 3.5
-env:
- - TOX_ENV=py26
- - TOX_ENV=py27
- - TOX_ENV=py32
- - TOX_ENV=py33
- - TOX_ENV=py34
- - TOX_ENV=py35
- - TOX_ENV=pypy
- - TOX_ENV=pypy3
- - JYTHON=true
+matrix:
+ fast_finish: true
+ include:
+ - python: 3.5
+ env: TOX_ENV=py35
+ - python: 3.4
+ env: TOX_ENV=py34
+ - python: 3.3
+ env: TOX_ENV=py33
+ - python: 3.2
+ env: TOX_ENV=py32
+ - python: 2.7
+ env: TOX_ENV=py27
+ - python: 2.6
+ env: TOX_ENV=py26
+ - python: pypy
+ env: TOX_ENV=pypy
+ - python: pypy3
+ env: TOX_ENV=pypy3
+ - python: 3.5
+ env: $JYTHON=true
install:
+ - if [ $TOX_ENV = "py32" ]; then travis_retry pip install "virtualenv<14.0.0" "tox<1.8.0"; fi
+ - if [ $TOX_ENV = "pypy3" ]; then travis_retry pip install "virtualenv<14.0.0" "tox<1.8.0"; fi
- if [ -z "$JYTHON" ]; then pip install codecov; fi
- if [ "$TOX_ENV" ]; then travis_retry pip install "virtualenv<14.0.0" tox; fi
before_install:
@@ -22,4 +35,4 @@ script:
- if [ "$JYTHON" ]; then travis_retry jython setup.py test; fi
- if [ "$TOX_ENV" ]; then tox -e $TOX_ENV; fi
after_success:
- - codecov
\ No newline at end of file
+ - codecov
diff --git a/pyanyapi/interfaces.py b/pyanyapi/interfaces.py
index 698c637..c0914b2 100644
--- a/pyanyapi/interfaces.py
+++ b/pyanyapi/interfaces.py
@@ -274,7 +274,7 @@ class YAMLInterface(DictInterface):
def perform_parsing(self):
try:
- return yaml.load(self.content)
+ return yaml.safe_load(self.content)
except yaml.error.YAMLError:
raise ResponseParseError(self._error_message, self.content)
|
Stranger6667/pyanyapi
|
aebee636ad26f387850a6c8ab820ce4aac3f9adb
|
diff --git a/tests/test_parsers.py b/tests/test_parsers.py
index 38223e2..4958b21 100644
--- a/tests/test_parsers.py
+++ b/tests/test_parsers.py
@@ -63,6 +63,15 @@ def test_yaml_parser_error():
parsed.test
+def test_yaml_parser_vulnerability():
+ """
+ In case of usage of yaml.load `test` value will be equal to 0.
+ """
+ parsed = YAMLParser({'test': 'container > test'}).parse('!!python/object/apply:os.system ["exit 0"]')
+ with pytest.raises(ResponseParseError):
+ parsed.test
+
+
@lxml_is_supported
@pytest.mark.parametrize(
'settings', (
|
YAMLParser method is vulnerable
from pyanyapi import YAMLParser
YAMLParser({'test': 'container > test'}).parse('!!python/object/apply:os.system ["calc.exe"]').test
Hi, there is a vulnerability in YAMLParser method in Interfaces.py, please see PoC above. It can execute arbitrary python commands resulting in command execution.
|
0.0
|
aebee636ad26f387850a6c8ab820ce4aac3f9adb
|
[
"tests/test_parsers.py::test_yaml_parser_error",
"tests/test_parsers.py::test_yaml_parser_vulnerability",
"tests/test_parsers.py::test_yaml_parse"
] |
[
"tests/test_parsers.py::test_xml_objectify_parser",
"tests/test_parsers.py::test_xml_objectify_parser_error",
"tests/test_parsers.py::test_xml_parser_error",
"tests/test_parsers.py::test_xml_parsed[settings0]",
"tests/test_parsers.py::test_xml_parsed[settings1]",
"tests/test_parsers.py::test_xml_simple_settings",
"tests/test_parsers.py::test_json_parsed",
"tests/test_parsers.py::test_multiple_parser_join",
"tests/test_parsers.py::test_multiply_parsers_declaration",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"test\":\"value\"}}-test-value]",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"test\":\"value\"}}-second-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":{\"fail\":[1]}}-second-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":[[1],[],[3]]}-third-expected3]",
"tests/test_parsers.py::test_empty_values[{\"container\":null}-null-None]",
"tests/test_parsers.py::test_empty_values[{\"container\":[1,2]}-test-1,2]",
"tests/test_parsers.py::test_attributes",
"tests/test_parsers.py::test_efficient_parsing",
"tests/test_parsers.py::test_simple_config_xml_parser",
"tests/test_parsers.py::test_simple_config_json_parser",
"tests/test_parsers.py::test_settings_inheritance",
"tests/test_parsers.py::test_complex_config",
"tests/test_parsers.py::test_json_parse",
"tests/test_parsers.py::test_json_value_error_parse",
"tests/test_parsers.py::test_regexp_parse",
"tests/test_parsers.py::test_ajax_parser",
"tests/test_parsers.py::test_ajax_parser_cache",
"tests/test_parsers.py::test_ajax_parser_invalid_settings",
"tests/test_parsers.py::test_parse_memoization",
"tests/test_parsers.py::test_regexp_settings",
"tests/test_parsers.py::test_parse_all",
"tests/test_parsers.py::test_parse_all_combined_parser",
"tests/test_parsers.py::test_parse_csv",
"tests/test_parsers.py::test_parse_csv_custom_delimiter",
"tests/test_parsers.py::test_csv_parser_error",
"tests/test_parsers.py::test_children[SubParser]",
"tests/test_parsers.py::test_children[sub_parser1]",
"tests/test_parsers.py::TestIndexOfParser::test_default[foo-b\\xe1r]",
"tests/test_parsers.py::TestIndexOfParser::test_default[foo-b\\xc3\\xa1r]",
"tests/test_parsers.py::TestIndexOfParser::test_parsing_error[has_bar]",
"tests/test_parsers.py::TestIndexOfParser::test_parsing_error[has_baz]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-11-07 09:56:44+00:00
|
mit
| 724
|
|
SathyaBhat__spotify-dl-368
|
diff --git a/spotify_dl/spotify.py b/spotify_dl/spotify.py
index f777a59..448d5e4 100644
--- a/spotify_dl/spotify.py
+++ b/spotify_dl/spotify.py
@@ -42,8 +42,14 @@ def fetch_tracks(sp, item_type, item_id):
continue
track_album_info = track_info.get("album")
track_num = track_info.get("track_number")
- spotify_id = track_info.get("id")
track_name = track_info.get("name")
+ spotify_id = track_info.get("id")
+ try:
+ track_audio_data = sp.audio_analysis(spotify_id)
+ tempo = track_audio_data.get("track").get("tempo")
+ except:
+ log.error("Couldn't fetch audio analysis for %s", track_name)
+ tempo = None
track_artist = ", ".join(
[artist["name"] for artist in track_info.get("artists")]
)
@@ -86,6 +92,7 @@ def fetch_tracks(sp, item_type, item_id):
"genre": genre,
"spotify_id": spotify_id,
"track_url": None,
+ "tempo": tempo,
}
)
offset += 1
@@ -141,6 +148,12 @@ def fetch_tracks(sp, item_type, item_id):
)
track_num = item["track_number"]
spotify_id = item.get("id")
+ try:
+ track_audio_data = sp.audio_analysis(spotify_id)
+ tempo = track_audio_data.get("track").get("tempo")
+ except:
+ log.error("Couldn't fetch audio analysis for %s", track_name)
+ tempo = None
songs_list.append(
{
"name": track_name,
@@ -154,6 +167,7 @@ def fetch_tracks(sp, item_type, item_id):
"cover": cover,
"genre": genre,
"spotify_id": spotify_id,
+ "tempo": tempo,
}
)
offset += 1
@@ -182,6 +196,12 @@ def fetch_tracks(sp, item_type, item_id):
album_total = album_info.get("total_tracks")
track_num = items["track_number"]
spotify_id = items["id"]
+ try:
+ track_audio_data = sp.audio_analysis(spotify_id)
+ tempo = track_audio_data.get("track").get("tempo")
+ except:
+ log.error("Couldn't fetch audio analysis for %s", track_name)
+ tempo = None
if len(items["album"]["images"]) > 0:
cover = items["album"]["images"][0]["url"]
else:
@@ -203,6 +223,7 @@ def fetch_tracks(sp, item_type, item_id):
"genre": genre,
"track_url": None,
"spotify_id": spotify_id,
+ "tempo": tempo,
}
)
diff --git a/spotify_dl/youtube.py b/spotify_dl/youtube.py
index e0f47f5..f3b6846 100644
--- a/spotify_dl/youtube.py
+++ b/spotify_dl/youtube.py
@@ -64,13 +64,13 @@ def write_tracks(tracks_file, song_dict):
i = 0
writer = csv.writer(file_out, delimiter=";")
for url_dict in song_dict["urls"]:
- # for track in url_dict['songs']:
for track in url_dict["songs"]:
track_url = track["track_url"] # here
track_name = track["name"]
track_artist = track["artist"]
track_num = track["num"]
track_album = track["album"]
+ track_tempo = track["tempo"]
track["save_path"] = url_dict["save_path"]
track_db.append(track)
track_index = i
@@ -81,6 +81,7 @@ def write_tracks(tracks_file, song_dict):
track_url,
str(track_num),
track_album,
+ str(track_tempo),
str(track_index),
]
try:
@@ -119,6 +120,8 @@ def set_tags(temp, filename, kwargs):
)
song_file["genre"] = song.get("genre")
+ if song.get("tempo") is not None:
+ song_file["bpm"] = str(song.get("tempo"))
song_file.save()
song_file = MP3(filename, ID3=ID3)
cover = song.get("cover")
|
SathyaBhat/spotify-dl
|
b56afb2b459b93245dede75b7c27fd46753707ff
|
diff --git a/tests/test_spotify_fetch_tracks.py b/tests/test_spotify_fetch_tracks.py
index 89b2ed6..7addca4 100644
--- a/tests/test_spotify_fetch_tracks.py
+++ b/tests/test_spotify_fetch_tracks.py
@@ -32,6 +32,7 @@ def test_spotify_playlist_fetch_one():
"track_url": None,
"playlist_num": 1,
"spotify_id": "2GpBrAoCwt48fxjgjlzMd4",
+ 'tempo': 74.656,
} == songs[0]
@@ -53,6 +54,7 @@ def test_spotify_playlist_fetch_more():
"year": "2012",
"playlist_num": 1,
"spotify_id": "4rzfv0JLZfVhOhbSQ8o5jZ",
+ 'tempo': 135.016,
},
{
"album": "Wellness & Dreaming Source",
@@ -66,6 +68,7 @@ def test_spotify_playlist_fetch_more():
"playlist_num": 2,
"track_url": None,
"spotify_id": "5o3jMYOSbaVz3tkgwhELSV",
+ 'tempo': 137.805,
},
{
"album": "This Is Happening",
@@ -79,6 +82,7 @@ def test_spotify_playlist_fetch_more():
"year": "2010",
"playlist_num": 3,
"spotify_id": "4Cy0NHJ8Gh0xMdwyM9RkQm",
+ 'tempo': 134.99,
},
{
"album": "Glenn Horiuchi Trio / Gelenn Horiuchi Quartet: Mercy / Jump Start "
@@ -94,6 +98,7 @@ def test_spotify_playlist_fetch_more():
"track_url": None,
"playlist_num": 4,
"spotify_id": "6hvFrZNocdt2FcKGCSY5NI",
+ 'tempo': 114.767,
},
{
"album": "All The Best (Spanish Version)",
@@ -107,6 +112,7 @@ def test_spotify_playlist_fetch_more():
"year": "2007",
"playlist_num": 5,
"spotify_id": "2E2znCPaS8anQe21GLxcvJ",
+ 'tempo': 122.318,
},
] == songs
@@ -128,6 +134,7 @@ def test_spotify_track_fetch_one():
"track_url": None,
"playlist_num": 1,
"spotify_id": "2GpBrAoCwt48fxjgjlzMd4",
+ 'tempo': 74.656,
} == songs[0]
@@ -148,6 +155,7 @@ def test_spotify_album_fetch_one():
"year": "2012",
"playlist_num": 1,
"spotify_id": "5EoKQDGE2zxrTfRFZF52u5",
+ 'tempo': 120.009,
} == songs[0]
@@ -169,6 +177,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 1,
"spotify_id": "69Yw7H4bRIwfIxL0ZCZy8y",
+ 'tempo': 120.955,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -182,6 +191,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 2,
"spotify_id": "5GGSjXZeTgX9sKYBtl8K6U",
+ 'tempo': 147.384,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -195,6 +205,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 3,
"spotify_id": "0Ssh20fuVhmasLRJ97MLnp",
+ 'tempo': 152.769,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -208,6 +219,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 4,
"spotify_id": "2LasW39KJDE4VH9hTVNpE2",
+ 'tempo': 115.471,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -221,6 +233,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 5,
"spotify_id": "6jXrIu3hWbmJziw34IHIwM",
+ 'tempo': 145.124,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -234,6 +247,7 @@ def test_spotify_album_fetch_more():
"track_url": None,
"playlist_num": 6,
"spotify_id": "5dHmGuUeRgp5f93G69tox5",
+ 'tempo': 108.544,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -247,6 +261,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 7,
"spotify_id": "2KPj0oB7cUuHQ3FuardOII",
+ 'tempo': 159.156,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -260,6 +275,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 8,
"spotify_id": "34CcBjL9WqEAtnl2i6Hbxa",
+ 'tempo': 118.48,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -273,6 +289,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 9,
"spotify_id": "1x9ak6LGIazLhfuaSIEkhG",
+ 'tempo': 112.623,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -286,6 +303,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 10,
"spotify_id": "4CITL18Tos0PscW1amCK4j",
+ 'tempo': 145.497,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -299,6 +317,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 11,
"spotify_id": "1e9Tt3nKBwRbuaU79kN3dn",
+ 'tempo': 126.343,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -312,6 +331,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 12,
"spotify_id": "0uHqoDT7J2TYBsJx6m4Tvi",
+ 'tempo': 172.274,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -325,6 +345,8 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 13,
"spotify_id": "3MIueGYoNiyBNfi5ukDgAK",
+ 'tempo': 146.712,
+
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -338,6 +360,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 14,
"spotify_id": "34WAOFWdJ83a3YYrDAZTjm",
+ 'tempo': 128.873,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -351,6 +374,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 15,
"spotify_id": "2AFIPUlApcUwGEgOSDwoBz",
+ 'tempo': 122.986,
},
{
"album": "Queen II (Deluxe Remastered Version)",
@@ -364,6 +388,7 @@ def test_spotify_album_fetch_more():
"year": "1974",
"playlist_num": 16,
"spotify_id": "4G4Sf18XkFvNTV5vAxiQyd",
+ 'tempo': 169.166,
},
] == songs
assert (len(songs)) == 16
@@ -387,5 +412,6 @@ def test_spotify_playlist_fetch_local_file():
"year": "",
"playlist_num": 1,
"spotify_id": None,
+ "tempo": None,
}
- ] == songs
+ ] == songs
\ No newline at end of file
diff --git a/tests/test_youtube.py b/tests/test_youtube.py
index d3c29d6..38f0f38 100644
--- a/tests/test_youtube.py
+++ b/tests/test_youtube.py
@@ -28,6 +28,7 @@ def test_download_one_false_skip():
"cover": "https://i.scdn.co/image/ab67616d0000b27396d28597a5ae44ab66552183",
"genre": "album rock",
"spotify_id": "2GpBrAoCwt48fxjgjlzMd4",
+ 'tempo': 74.656,
}
],
}
@@ -83,6 +84,7 @@ def test_download_one_true_skip():
"cover": "https://i.scdn.co/image/ab67616d0000b27396d28597a5ae44ab66552183",
"genre": "album rock",
"spotify_id": "2GpBrAoCwt48fxjgjlzMd4",
+ 'tempo': 74.656,
}
],
}
@@ -120,6 +122,7 @@ def test_download_cover_none():
"cover": None,
"genre": "classic rock",
"spotify_id": "12LhScrlYazmU4vsqpRQNI",
+ 'tempo': 159.15,
}
],
}
|
Add BPM to metadata of MP3 songs.
Since adding BPM to the metadata of the MP3 is an option provided with the Spotify API and mutagen supports it as well. This feature could help with many who are in the field of DJ Mixing.
|
0.0
|
b56afb2b459b93245dede75b7c27fd46753707ff
|
[
"tests/test_spotify_fetch_tracks.py::test_spotify_playlist_fetch_one",
"tests/test_spotify_fetch_tracks.py::test_spotify_playlist_fetch_more",
"tests/test_spotify_fetch_tracks.py::test_spotify_track_fetch_one",
"tests/test_spotify_fetch_tracks.py::test_spotify_album_fetch_one",
"tests/test_spotify_fetch_tracks.py::test_spotify_album_fetch_more",
"tests/test_spotify_fetch_tracks.py::test_spotify_playlist_fetch_local_file"
] |
[
"tests/test_youtube.py::test_download_one_true_skip"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-12-30 12:03:54+00:00
|
mit
| 674
|
|
BelgianBiodiversityPlatform__python-dwca-reader-101
|
diff --git a/dwca/files.py b/dwca/files.py
index a1bcd9a..bada8fa 100644
--- a/dwca/files.py
+++ b/dwca/files.py
@@ -6,7 +6,7 @@ from array import array
from typing import List, Union, IO, Dict, Optional
from dwca.descriptors import DataFileDescriptor
-from dwca.rows import CoreRow, ExtensionRow
+from dwca.rows import CoreRow, ExtensionRow, Row
class CSVDataFile(object):
@@ -105,11 +105,6 @@ class CSVDataFile(object):
Creating this index can be time and memory consuming for large archives, so it's created on the fly
at first access.
"""
- if self.file_descriptor.represents_corefile:
- raise AttributeError(
- "coreid_index is only available for extension data files"
- )
-
if self._coreid_index is None:
self._coreid_index = self._build_coreid_index()
@@ -120,14 +115,18 @@ class CSVDataFile(object):
index = {} # type: Dict[str, array[int]]
for position, row in enumerate(self):
- tmp = ExtensionRow(row, position, self.file_descriptor)
- index.setdefault(tmp.core_id, array('L')).append(position)
+ if self.file_descriptor.represents_corefile:
+ tmp = CoreRow(row, position, self.file_descriptor)
+ index.setdefault(tmp.id, array('L')).append(position)
+ else:
+ tmp = ExtensionRow(row, position, self.file_descriptor)
+ index.setdefault(tmp.core_id, array('L')).append(position)
return index
# TODO: For ExtensionRow and a specific field only, generalize ?
# TODO: What happens if called on a Core Row?
- def get_all_rows_by_coreid(self, core_id: int) -> List[ExtensionRow]:
+ def get_all_rows_by_coreid(self, core_id: int) -> List[Row]:
"""Return a list of :class:`dwca.rows.ExtensionRow` whose Core Id field match `core_id`."""
if core_id not in self.coreid_index:
return []
diff --git a/dwca/star_record.py b/dwca/star_record.py
new file mode 100644
index 0000000..6be17ff
--- /dev/null
+++ b/dwca/star_record.py
@@ -0,0 +1,58 @@
+from dwca.files import CSVDataFile
+from typing import List, Literal
+import itertools
+
+
+class StarRecordIterator(object):
+ """ Object used to iterate over multiple DWCA-files joined on the coreid
+
+ :param files_to_join: a list of the `dwca.files.CSVDataFile`s we'd like to join.
+ May or may not include the core file (the core is not treated in a special way)
+ :param how: indicates the type of join. "inner" and "outer" correspond vaguely to
+ inner and full joins. The outer join includes rows that don't match on all files,
+ however, it doesn't create null fields to fill in when rows are missing in files.
+ Attempts to conform to pandas.DataFrame.merge API.
+ """
+ def __init__(self, files_to_join: List[CSVDataFile], how: Literal["inner", "outer"] = "inner"):
+ self.files_to_join = files_to_join
+
+ # gather the coreids we want to join over.
+ self.common_core = set(self.files_to_join[0].coreid_index)
+ for data_file in self.files_to_join[1:]:
+ # inner join: coreid must be in all files
+ if how == "inner":
+ self.common_core &= set(data_file.coreid_index)
+ # outer join: coreid may be in any files
+ elif how == "outer":
+ self.common_core |= set(data_file.coreid_index)
+
+ # initialize iterator variables
+ self._common_core_iterator = iter(self.common_core)
+ self._cross_product_iterator = iter([])
+
+
+ def __next__(self):
+ # the next combination of rows matching this coreid
+ next_positions = next(self._cross_product_iterator, None)
+ # we finished all the combinations for this coreid
+ if not next_positions:
+ # get the next coreid
+ self._current_coreid = next(self._common_core_iterator)
+ self._files_with_current_coreid = [
+ csv_file for csv_file in self.files_to_join
+ if self._current_coreid in csv_file.coreid_index]
+ # this iterates over all combinations of rows matching a coreid from all files
+ self._cross_product_iterator = itertools.product(
+ *(
+ csv_file.coreid_index[self._current_coreid]
+ for csv_file in self._files_with_current_coreid
+ ))
+ # go back and try to iterate over the rows for the new coreid
+ return next(self)
+ # zip up this combination of rows from all of the files.
+ return (
+ csv_file.get_row_by_position(position) for position, csv_file in zip(next_positions, self._files_with_current_coreid)
+ )
+
+
+ def __iter__(self): return self
|
BelgianBiodiversityPlatform/python-dwca-reader
|
b5b2bea333527353c5fe05b22e01c347a890ac61
|
diff --git a/dwca/test/test_datafile.py b/dwca/test/test_datafile.py
index d5d7841..3fc461b 100644
--- a/dwca/test/test_datafile.py
+++ b/dwca/test/test_datafile.py
@@ -31,9 +31,13 @@ class TestCSVDataFile(unittest.TestCase):
with DwCAReader(sample_data_path("dwca-2extensions.zip")) as dwca:
extension_files = dwca.extension_files
+ core_txt = dwca.core_file
description_txt = extension_files[0]
vernacular_txt = extension_files[1]
+ expected_core = {'1': array('L', [0]), '2': array('L', [1]), '3': array('L', [2]), '4': array('L', [3])}
+ assert core_txt.coreid_index == expected_core
+
expected_vernacular = {"1": array('L', [0, 1, 2]), "2": array('L', [3])}
assert vernacular_txt.coreid_index == expected_vernacular
diff --git a/dwca/test/test_star_record.py b/dwca/test/test_star_record.py
new file mode 100644
index 0000000..c080595
--- /dev/null
+++ b/dwca/test/test_star_record.py
@@ -0,0 +1,55 @@
+from dwca.read import DwCAReader
+from dwca.rows import CoreRow
+from dwca.star_record import StarRecordIterator
+from .helpers import sample_data_path
+import pytest
+import unittest
+
+class TestStarRecordIterator(unittest.TestCase):
+
+ def test_inner_join(self):
+
+ expected_inner_join = frozenset({
+ frozenset({('1', 0, 'Description'), ('1', 0, 'Taxon'), ('1', 0, 'VernacularName')}),
+ frozenset({('1', 0, 'Description'), ('1', 0, 'Taxon'), ('1', 1, 'VernacularName')}),
+ frozenset({('1', 0, 'Description'), ('1', 0, 'Taxon'), ('1', 2, 'VernacularName')}),
+ frozenset({('1', 1, 'Description'), ('1', 0, 'Taxon'), ('1', 0, 'VernacularName')}),
+ frozenset({('1', 1, 'Description'), ('1', 0, 'Taxon'), ('1', 1, 'VernacularName')}),
+ frozenset({('1', 1, 'Description'), ('1', 0, 'Taxon'), ('1', 2, 'VernacularName')})
+ })
+
+ with DwCAReader(sample_data_path("dwca-2extensions.zip")) as dwca:
+ star_records = StarRecordIterator(dwca.extension_files + [dwca.core_file], how="inner")
+ stars = []
+ for star_record in star_records:
+ rows = []
+ for row in star_record:
+ rows.append((row.id if isinstance(row, CoreRow) else row.core_id, row.position, row.rowtype.split('/')[-1]))
+ stars.append(frozenset(rows))
+
+ assert frozenset(stars) == expected_inner_join
+
+ def test_outer_join(self):
+
+ expected_outer_join = frozenset({
+ frozenset({('4', 2, 'Description'), ('4', 3, 'Taxon')}),
+ frozenset({('1', 0, 'Description'), ('1', 0, 'Taxon'), ('1', 0, 'VernacularName')}),
+ frozenset({('1', 0, 'Description'), ('1', 0, 'Taxon'), ('1', 1, 'VernacularName')}),
+ frozenset({('1', 0, 'Description'), ('1', 0, 'Taxon'), ('1', 2, 'VernacularName')}),
+ frozenset({('1', 1, 'Description'), ('1', 0, 'Taxon'), ('1', 0, 'VernacularName')}),
+ frozenset({('1', 1, 'Description'), ('1', 0, 'Taxon'), ('1', 1, 'VernacularName')}),
+ frozenset({('1', 1, 'Description'), ('1', 0, 'Taxon'), ('1', 2, 'VernacularName')}),
+ frozenset({('3', 2, 'Taxon')}),
+ frozenset({('2', 1, 'Taxon'), ('2', 3, 'VernacularName')})
+ })
+
+ with DwCAReader(sample_data_path("dwca-2extensions.zip")) as dwca:
+ star_records = StarRecordIterator(dwca.extension_files + [dwca.core_file], how="outer")
+ stars = []
+ for star_record in star_records:
+ rows = []
+ for row in star_record:
+ rows.append((row.id if isinstance(row, CoreRow) else row.core_id, row.position, row.rowtype.split('/')[-1]))
+ stars.append(frozenset(rows))
+
+ assert frozenset(stars) == expected_outer_join
|
Extend CSVDataFile to support hash index on Core file
### Description
It would be useful to be able to generalize iterators over the type of record. AFAIK, there's nothing particularly special about the Core Record data-format-wise, so it should be possible to create iterator methods that apply arbitrarily to Core or Extension files.
My particular use case is that I'm working on an iterator for JOINed files, and it's awkward to deal with the Core Record as a special entity, since nothing about the JOIN process requires knowledge of whether a file is Core or Extension.
### Deliverables:
1) Extend the CSVDataFile class to allow a hash index on the Core Record.
- This seems like it's maybe a 3 line change in [`_build_coreid_index`](https://github.com/csbrown/python-dwca-reader/blob/master/dwca/files.py#L118) to just if/then over what kind of row it's inspecting.
- Maybe also need to edit [some type hints](https://github.com/csbrown/python-dwca-reader/blob/master/dwca/files.py#L130)
2) Update the [`test_coreid_index`](https://github.com/csbrown/python-dwca-reader/blob/master/dwca/test/test_datafile.py#L30) test to also build an index on the Core Record.
|
0.0
|
b5b2bea333527353c5fe05b22e01c347a890ac61
|
[
"dwca/test/test_datafile.py::TestCSVDataFile::test_close",
"dwca/test/test_datafile.py::TestCSVDataFile::test_coreid_index",
"dwca/test/test_datafile.py::TestCSVDataFile::test_file_descriptor_attribute",
"dwca/test/test_datafile.py::TestCSVDataFile::test_get_line_at_position_raises_indexerror",
"dwca/test/test_datafile.py::TestCSVDataFile::test_iterate",
"dwca/test/test_datafile.py::TestCSVDataFile::test_lines_to_ignore_attribute",
"dwca/test/test_datafile.py::TestCSVDataFile::test_string_representation",
"dwca/test/test_star_record.py::TestStarRecordIterator::test_inner_join",
"dwca/test/test_star_record.py::TestStarRecordIterator::test_outer_join"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-08 16:12:20+00:00
|
bsd-3-clause
| 105
|
|
minio__minio-py-1054
|
diff --git a/minio/commonconfig.py b/minio/commonconfig.py
index 735dd42..9d43851 100644
--- a/minio/commonconfig.py
+++ b/minio/commonconfig.py
@@ -179,8 +179,6 @@ class Filter:
)
if not valid:
raise ValueError("only one of and, prefix or tag must be provided")
- if prefix is not None and not prefix:
- raise ValueError("prefix must not be empty")
self._and_operator = and_operator
self._prefix = prefix
self._tag = tag
diff --git a/minio/tagging.py b/minio/tagging.py
index ac186ab..d666106 100644
--- a/minio/tagging.py
+++ b/minio/tagging.py
@@ -47,6 +47,5 @@ class Tagging:
"""Convert to XML."""
element = Element("Tagging")
if self._tags:
- element = SubElement(element, "TagSet")
- self._tags.toxml(element)
+ self._tags.toxml(SubElement(element, "TagSet"))
return element
diff --git a/minio/xml.py b/minio/xml.py
index 438dbb6..f995d2a 100644
--- a/minio/xml.py
+++ b/minio/xml.py
@@ -77,7 +77,7 @@ def findtext(element, name, strict=False):
if strict:
raise ValueError("XML element <{0}> not found".format(name))
return None
- return element.text
+ return element.text or ""
def unmarshal(cls, xmlstring):
|
minio/minio-py
|
bac587a450e0b1e63ea643d86f25006da2fcaba2
|
diff --git a/tests/unit/lifecycleconfig_test.py b/tests/unit/lifecycleconfig_test.py
index d9ecde8..577ad01 100644
--- a/tests/unit/lifecycleconfig_test.py
+++ b/tests/unit/lifecycleconfig_test.py
@@ -41,6 +41,18 @@ class LifecycleConfigTest(TestCase):
)
xml.marshal(config)
+ config = LifecycleConfig(
+ [
+ Rule(
+ ENABLED,
+ rule_filter=Filter(prefix=""),
+ rule_id="rule",
+ expiration=Expiration(days=365),
+ ),
+ ],
+ )
+ xml.marshal(config)
+
config = xml.unmarshal(
LifecycleConfig,
"""<LifeCycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
@@ -68,3 +80,20 @@ class LifecycleConfigTest(TestCase):
</LifeCycleConfiguration>""",
)
xml.marshal(config)
+
+ config = xml.unmarshal(
+ LifecycleConfig,
+ """<LifeCycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Rule>
+ <ID>DeleteAfterBecomingNonCurrent</ID>
+ <Filter>
+ <Prefix></Prefix>
+ </Filter>
+ <Status>Enabled</Status>
+ <NoncurrentVersionExpiration>
+ <NoncurrentDays>100</NoncurrentDays>
+ </NoncurrentVersionExpiration>
+ </Rule>
+</LifeCycleConfiguration>""",
+ )
+ xml.marshal(config)
|
Issue setting bucket (and potentially object) tags
I am using minio-py 7.0.0 and receive a MalformedXML S3Error any time I try to set the tags of my bucket. This issue probably occurs with object tags as well, but I have not tested that yet.
The simplified code below follows your API example code but seems to fail. `bucket_name` and `s3Client` client are defined elsewhere in my code and everything is properly imported within my code.
```
tags = Tags.new_bucket_tags()
tags["Project"] = "Project One"
tags["User"] = "jsmith"
try:
s3Client.set_bucket_tags(bucket_name, tags)
except (ValueError, S3Error) as exc:
print(str(exc))
```
Here is the error I am receiving:
```S3 operation failed; code: MalformedXML, message: The XML you provided was not well-formed or did not validate against our published schema, resource: None, request_id: ****, host_id: ****```
|
0.0
|
bac587a450e0b1e63ea643d86f25006da2fcaba2
|
[
"tests/unit/lifecycleconfig_test.py::LifecycleConfigTest::test_config"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-05 06:07:00+00:00
|
apache-2.0
| 3,942
|
|
PostHog__posthog-python-30
|
diff --git a/posthog/client.py b/posthog/client.py
index 0588082..22157cf 100644
--- a/posthog/client.py
+++ b/posthog/client.py
@@ -330,8 +330,8 @@ class Client(object):
except IndexError:
return default
- if feature_flag.get("is_simple_flag") and feature_flag.get("rollout_percentage"):
- response = _hash(key, distinct_id) <= (feature_flag["rollout_percentage"] / 100)
+ if feature_flag.get("is_simple_flag"):
+ response = _hash(key, distinct_id) <= ((feature_flag.get("rollout_percentage", 100) or 100) / 100)
else:
try:
request_data = {
|
PostHog/posthog-python
|
372fb74637bbcdc0c1de70f9f11b2b5d754553f0
|
diff --git a/posthog/test/test_client.py b/posthog/test/test_client.py
index e4971db..9c867a0 100644
--- a/posthog/test/test_client.py
+++ b/posthog/test/test_client.py
@@ -325,14 +325,28 @@ class TestClient(unittest.TestCase):
self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
@mock.patch("posthog.client.decide")
- def test_feature_enabled_request(self, patch_get):
- patch_get.return_value = {"featureFlags": ["beta-feature"]}
+ def test_feature_enabled_request(self, patch_decide):
+ patch_decide.return_value = {"featureFlags": ["beta-feature"]}
client = Client(TEST_API_KEY)
client.feature_flags = [
{"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": False, "rollout_percentage": 100}
]
self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+ @mock.patch("posthog.client.get")
+ def test_feature_enabled_simple_without_rollout_percentage(self, patch_get):
+ client = Client(TEST_API_KEY)
+ client.feature_flags = [{"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": True}]
+ self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+
+ @mock.patch("posthog.client.get")
+ def test_feature_enabled_simple_with_none_rollout_percentage(self, patch_get):
+ client = Client(TEST_API_KEY)
+ client.feature_flags = [
+ {"id": 1, "name": "Beta Feature", "key": "beta-feature", "is_simple_flag": True, "rollout_percantage": None}
+ ]
+ self.assertTrue(client.feature_enabled("beta-feature", "distinct_id"))
+
@mock.patch("posthog.client.Poller")
@mock.patch("posthog.client.get")
def test_feature_enabled_doesnt_exist(self, patch_get, patch_poll):
|
Library does not properly handle feature flags with no % rollout
See sentry issue https://sentry.io/organizations/posthog/issues/2220836956/?project=1899813. If a feature flag is enabled for everyone without a % rollout, the client crashes. Notice that this is different from a 100% rollout which works fine.
```
unsupported operand type(s) for /: 'NoneType' and 'int'
```
posthoganalytics/client.py#L276
```python
response = _hash(key, distinct_id) <= (feature_flag['rollout_percentage'] / 100)
```
|
0.0
|
372fb74637bbcdc0c1de70f9f11b2b5d754553f0
|
[
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_with_none_rollout_percentage",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple_without_rollout_percentage"
] |
[
"posthog/test/test_client.py::TestClient::test_advanced_capture",
"posthog/test/test_client.py::TestClient::test_advanced_identify",
"posthog/test/test_client.py::TestClient::test_advanced_page",
"posthog/test/test_client.py::TestClient::test_advanced_set",
"posthog/test/test_client.py::TestClient::test_advanced_set_once",
"posthog/test/test_client.py::TestClient::test_basic_alias",
"posthog/test/test_client.py::TestClient::test_basic_capture",
"posthog/test/test_client.py::TestClient::test_basic_identify",
"posthog/test/test_client.py::TestClient::test_basic_page",
"posthog/test/test_client.py::TestClient::test_basic_page_distinct_uuid",
"posthog/test/test_client.py::TestClient::test_basic_set",
"posthog/test/test_client.py::TestClient::test_basic_set_once",
"posthog/test/test_client.py::TestClient::test_call_identify_fails",
"posthog/test/test_client.py::TestClient::test_debug",
"posthog/test/test_client.py::TestClient::test_default_timeout_15",
"posthog/test/test_client.py::TestClient::test_empty_flush",
"posthog/test/test_client.py::TestClient::test_feature_enabled_doesnt_exist",
"posthog/test/test_client.py::TestClient::test_feature_enabled_request",
"posthog/test/test_client.py::TestClient::test_feature_enabled_simple",
"posthog/test/test_client.py::TestClient::test_flush",
"posthog/test/test_client.py::TestClient::test_gzip",
"posthog/test/test_client.py::TestClient::test_load_feature_flags",
"posthog/test/test_client.py::TestClient::test_load_feature_flags_error",
"posthog/test/test_client.py::TestClient::test_load_feature_flags_wrong_key",
"posthog/test/test_client.py::TestClient::test_numeric_distinct_id",
"posthog/test/test_client.py::TestClient::test_overflow",
"posthog/test/test_client.py::TestClient::test_personal_api_key_doesnt_exist",
"posthog/test/test_client.py::TestClient::test_requires_api_key",
"posthog/test/test_client.py::TestClient::test_shutdown",
"posthog/test/test_client.py::TestClient::test_stringifies_distinct_id",
"posthog/test/test_client.py::TestClient::test_synchronous",
"posthog/test/test_client.py::TestClient::test_unicode",
"posthog/test/test_client.py::TestClient::test_user_defined_flush_at",
"posthog/test/test_client.py::TestClient::test_user_defined_timeout"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-05-14 11:50:01+00:00
|
mit
| 465
|
|
gtsystem__python-remotezip-14
|
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index cd0191b..c56f2c7 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -12,7 +12,7 @@ on:
jobs:
build:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
strategy:
matrix:
python-version: ["2.7", "3.6", "3.9", "3.10"]
diff --git a/remotezip.py b/remotezip.py
index cf82595..cfe6924 100644
--- a/remotezip.py
+++ b/remotezip.py
@@ -1,5 +1,6 @@
import io
import zipfile
+from itertools import tee
import requests
@@ -208,6 +209,13 @@ class RemoteFetcher:
raise RemoteIOError(str(e))
+def pairwise(iterable):
+ # pairwise('ABCDEFG') --> AB BC CD DE EF FG
+ a, b = tee(iterable)
+ next(b, None)
+ return zip(a, b)
+
+
class RemoteZip(zipfile.ZipFile):
def __init__(self, url, initial_buffer_size=64*1024, session=None, fetcher=RemoteFetcher, **kwargs):
fetcher = fetcher(url, session, **kwargs)
@@ -216,15 +224,12 @@ class RemoteZip(zipfile.ZipFile):
rio.set_position_to_size(self._get_position_to_size())
def _get_position_to_size(self):
- ilist = self.infolist()
+ ilist = [info.header_offset for info in self.infolist()]
if len(ilist) == 0:
return {}
+ ilist.sort()
+ ilist.append(self.start_dir)
+ return {a: b-a for a, b in pairwise(ilist)}
- position_to_size = {ilist[-1].header_offset: self.start_dir - ilist[-1].header_offset}
- for i in range(len(ilist) - 1):
- m1, m2 = ilist[i: i+2]
- position_to_size[m1.header_offset] = m2.header_offset - m1.header_offset
-
- return position_to_size
diff --git a/setup.py b/setup.py
index dea5194..1aea6e4 100644
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,7 @@ with open("README.md") as f:
setup(
name='remotezip',
- version='0.11.0',
+ version='0.11.1',
author='Giuseppe Tribulato',
author_email='[email protected]',
py_modules=['remotezip'],
|
gtsystem/python-remotezip
|
8d6634cc51e127afb7a9704c364f4fa136b5fd8d
|
diff --git a/test_remotezip.py b/test_remotezip.py
index 317a57c..c45f41c 100644
--- a/test_remotezip.py
+++ b/test_remotezip.py
@@ -238,6 +238,32 @@ class TestRemoteZip(unittest.TestCase):
self.assertIsNone(zfile.testzip())
+ @staticmethod
+ def make_unordered_zip_file(fname):
+ with zipfile.ZipFile(fname, 'w') as zip:
+ zip.writestr("fileA", "A" * 300000 + 'Z')
+ zip.writestr("fileB", "B" * 10000 + 'Z')
+ zip.writestr("fileC", "C" * 100000 + 'Z')
+ info_list = zip.infolist()
+ info_list[0], info_list[1] = info_list[1], info_list[0]
+
+ def test_unordered_fileinfo(self):
+ """Test that zip file with unordered fileinfo records works as well. Fix #13."""
+ with TmpDir() as dire:
+ fname = os.path.join(dire, 'test.zip')
+ self.make_unordered_zip_file(fname)
+
+ with rz.RemoteZip(fname, fetcher=LocalFetcher) as zfile:
+ names = zfile.namelist()
+ self.assertEqual(names, ['fileB', 'fileA', 'fileC'])
+ with zfile.open('fileB', 'r') as f:
+ self.assertEqual(f.read(), b"B" * 10000 + b'Z')
+ with zfile.open('fileA', 'r') as f:
+ self.assertEqual(f.read(), b"A" * 300000 + b'Z')
+ with zfile.open('fileC', 'r') as f:
+ self.assertEqual(f.read(), b"C" * 100000 + b'Z')
+ self.assertIsNone(zfile.testzip())
+
def test_fetch_part(self):
# fetch a range
expected_headers = {'Range': 'bytes=10-20'}
|
Get 416 Client Error on some zip files
remotezip raising `remotezip.RemoteIOError` on some files. For example, http://0x0.st/o5Pa.apk (it's unavailable for now, so I re-uploaded it to https://transfer.sh/get/rndzvr/test.apk)
Also uploaded it to GitHub as zip archive: [test.zip](https://github.com/gtsystem/python-remotezip/files/10267095/test.zip)
```
$ remotezip http://0x0.st/o5Pa.apk AndroidManifest.xml
Extracting AndroidManifest.xml...
Traceback (most recent call last):
File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 193, in fetch_fun
res, headers = self.request(self.url, range_header, kwargs, self.session)
File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 184, in request
res.raise_for_status()
File "/.../venv/lib/python3.10/site-packages/requests/models.py", line 1021, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 416 Client Error: Requested Range Not Satisfiable for url: http://0x0.st/o5Pa.apk
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/.../venv/bin/remotezip", line 43, in <module>
extract_files(args.url, args.filename, args.dir)
File "/.../venv/bin/remotezip", line 26, in extract_files
zip.extract(fname, path=path)
File "/usr/lib/python3.10/zipfile.py", line 1628, in extract
return self._extract_member(member, path, pwd)
File "/usr/lib/python3.10/zipfile.py", line 1698, in _extract_member
with self.open(member, pwd=pwd) as source, \
File "/usr/lib/python3.10/zipfile.py", line 1530, in open
fheader = zef_file.read(sizeFileHeader)
File "/usr/lib/python3.10/zipfile.py", line 745, in read
data = self._file.read(n)
File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 112, in read
self.buffer = self.fetch_fun((self.buffer.position, self.buffer.position + fetch_size -1), stream=stream)
File "/.../venv/lib/python3.10/site-packages/remotezip.py", line 196, in fetch_fun
raise RemoteIOError(str(e))
remotezip.RemoteIOError: 416 Client Error: Requested Range Not Satisfiable for url: http://0x0.st/o5Pa.apk
```
I also tested this file with [PartialZipBrowser](https://github.com/tihmstar/partialZipBrowser), and it works fine:
```
$ pzb -g AndroidManifest.xml https://0x0.st/o5Pa.apk
Version: 9bfdde2b2456181045f74631683fba491d8bf4f2 - 38
libfragmentzip version: 0.64-aaf6fae83a0aa6f7aae1c94721857076d04a14e8-RELEASE
init pzb: https://0x0.st/o5Pa.apk
init done
getting: AndroidManifest.xml
100% [===================================================================================================>]
download succeeded
```
It looks like byte range calculation bug in remotezip.
I also sniffed traffic, generated by remotezip and PartialZibBrowser while downloading this file, in mitmproxy, this may help you.
1. remotezip

2. PartialZipBrowser:

|
0.0
|
8d6634cc51e127afb7a9704c364f4fa136b5fd8d
|
[
"test_remotezip.py::TestRemoteZip::test_unordered_fileinfo"
] |
[
"test_remotezip.py::TestRemoteIO::test_simple",
"test_remotezip.py::TestRemoteIO::test_file_access",
"test_remotezip.py::TestPartialBuffer::test_static_seek",
"test_remotezip.py::TestPartialBuffer::test_static",
"test_remotezip.py::TestPartialBuffer::test_static_out_of_bound",
"test_remotezip.py::TestPartialBuffer::test_static_read_no_size",
"test_remotezip.py::TestPartialBuffer::test_stream",
"test_remotezip.py::TestPartialBuffer::test_stream_forward_seek",
"test_remotezip.py::TestRemoteZip::test_big_header",
"test_remotezip.py::TestRemoteZip::test_range_not_supported",
"test_remotezip.py::TestRemoteZip::test_zip64",
"test_remotezip.py::TestRemoteZip::test_interface",
"test_remotezip.py::TestRemoteZip::test_fetch_part",
"test_remotezip.py::TestRemoteZip::test_fetch_ending",
"test_remotezip.py::TestRemoteZip::test_custom_session",
"test_remotezip.py::TestLocalFetcher::test_parse_range_header",
"test_remotezip.py::TestLocalFetcher::test_build_range_header"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_git_commit_hash",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-21 22:01:59+00:00
|
mit
| 2,681
|
|
m3dev__gokart-147
|
diff --git a/gokart/redis_lock.py b/gokart/redis_lock.py
index d371b67..a287fd0 100644
--- a/gokart/redis_lock.py
+++ b/gokart/redis_lock.py
@@ -11,11 +11,11 @@ logger = getLogger(__name__)
class RedisParams(NamedTuple):
- redis_host: str = None
- redis_port: str = None
- redis_timeout: int = None
- redis_key: str = None
- should_redis_lock: bool = 180
+ redis_host: str
+ redis_port: str
+ redis_timeout: int
+ redis_key: str
+ should_redis_lock: bool
class RedisClient:
diff --git a/gokart/target.py b/gokart/target.py
index dc2cad1..c9d890c 100644
--- a/gokart/target.py
+++ b/gokart/target.py
@@ -77,7 +77,7 @@ class SingleFileTarget(TargetOnKart):
self,
target: luigi.target.FileSystemTarget,
processor: FileProcessor,
- redis_params: RedisParams = RedisParams(),
+ redis_params: RedisParams,
) -> None:
self._target = target
self._processor = processor
@@ -115,7 +115,7 @@ class ModelTarget(TargetOnKart):
temporary_directory: str,
load_function,
save_function,
- redis_params: RedisParams = RedisParams(),
+ redis_params: RedisParams,
) -> None:
self._zip_client = make_zip_client(file_path, temporary_directory)
self._temporary_directory = temporary_directory
diff --git a/gokart/task.py b/gokart/task.py
index 4aa99b2..15b3b62 100644
--- a/gokart/task.py
+++ b/gokart/task.py
@@ -61,8 +61,7 @@ class TaskOnKart(luigi.Task):
self._rerun_state = self.rerun
def output(self):
- file_path = self.__module__.replace(".", "/")
- return self.make_target(os.path.join(file_path, f"{type(self).__name__}.pkl"))
+ return self.make_target()
def requires(self):
tasks = self.make_task_instance_dictionary()
@@ -131,8 +130,10 @@ class TaskOnKart(luigi.Task):
return cls(**new_k)
- def make_target(self, relative_file_path: str, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart:
- file_path = os.path.join(self.workspace_directory, relative_file_path)
+ def make_target(self, relative_file_path: str = None, use_unique_id: bool = True, processor: Optional[FileProcessor] = None) -> TargetOnKart:
+ formatted_relative_file_path = relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace(".", "/"),
+ f"{type(self).__name__}.pkl")
+ file_path = os.path.join(self.workspace_directory, formatted_relative_file_path)
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_target(file_path=file_path,
unique_id=unique_id,
@@ -141,8 +142,10 @@ class TaskOnKart(luigi.Task):
redis_port=self.redis_port,
redis_timeout=self.redis_timeout)
- def make_large_data_frame_target(self, relative_file_path: str, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart:
- file_path = os.path.join(self.workspace_directory, relative_file_path)
+ def make_large_data_frame_target(self, relative_file_path: str = None, use_unique_id: bool = True, max_byte=int(2**26)) -> TargetOnKart:
+ formatted_relative_file_path = relative_file_path if relative_file_path is not None else os.path.join(self.__module__.replace(".", "/"),
+ f"{type(self).__name__}.zip")
+ file_path = os.path.join(self.workspace_directory, formatted_relative_file_path)
unique_id = self.make_unique_id() if use_unique_id else None
return gokart.target.make_model_target(file_path=file_path,
temporary_directory=self.local_temporary_directory,
|
m3dev/gokart
|
5a10506e5ef762d384fec1651e9cb56daa276336
|
diff --git a/test/test_task_on_kart.py b/test/test_task_on_kart.py
index 6b5a118..b1f4d10 100644
--- a/test/test_task_on_kart.py
+++ b/test/test_task_on_kart.py
@@ -153,6 +153,12 @@ class TaskTest(unittest.TestCase):
self.assertIsInstance(default_target, SingleFileTarget)
self.assertEqual(f'./resources/test/test_task_on_kart/_DummyTaskD_{task.task_unique_id}.pkl', default_target._target.path)
+ def test_default_large_dataframe_target(self):
+ task = _DummyTaskD()
+ default_large_dataframe_target = task.make_large_data_frame_target()
+ self.assertIsInstance(default_large_dataframe_target, ModelTarget)
+ self.assertEqual(f'./resources/test/test_task_on_kart/_DummyTaskD_{task.task_unique_id}.zip', default_large_dataframe_target._zip_client._file_path)
+
def test_make_target(self):
task = _DummyTask()
target = task.make_target('test.txt')
|
Default path for make_large_data_frame_target
In the same manner as `output()`、it might be great if `make_large_data_frame_target()` can produce default path.
```
file_path = self.__module__.replace('.', '/')
return self.make_large_data_frame_target(os.path.join(file_path, f'{type(self).__name__}.zip'))
```
|
0.0
|
5a10506e5ef762d384fec1651e9cb56daa276336
|
[
"test/test_task_on_kart.py::TaskTest::test_default_large_dataframe_target"
] |
[
"test/test_task_on_kart.py::TaskTest::test_add_cofigureation_evaluation_order",
"test/test_task_on_kart.py::TaskTest::test_add_configuration",
"test/test_task_on_kart.py::TaskTest::test_compare_targets_of_different_tasks",
"test/test_task_on_kart.py::TaskTest::test_complete_when_input_and_output_equal",
"test/test_task_on_kart.py::TaskTest::test_complete_when_modification_time_equals_output",
"test/test_task_on_kart.py::TaskTest::test_complete_with_modified_input",
"test/test_task_on_kart.py::TaskTest::test_complete_with_rerun_flag",
"test/test_task_on_kart.py::TaskTest::test_complete_with_uncompleted_input",
"test/test_task_on_kart.py::TaskTest::test_complete_without_dependency",
"test/test_task_on_kart.py::TaskTest::test_default_requires",
"test/test_task_on_kart.py::TaskTest::test_default_target",
"test/test_task_on_kart.py::TaskTest::test_dump",
"test/test_task_on_kart.py::TaskTest::test_load_data_frame_empty_input",
"test/test_task_on_kart.py::TaskTest::test_load_dictionary_at_once",
"test/test_task_on_kart.py::TaskTest::test_load_generator_with_single_target",
"test/test_task_on_kart.py::TaskTest::test_load_index_only_dataframe",
"test/test_task_on_kart.py::TaskTest::test_load_list_of_list_pandas",
"test/test_task_on_kart.py::TaskTest::test_load_tuple",
"test/test_task_on_kart.py::TaskTest::test_load_with_keyword",
"test/test_task_on_kart.py::TaskTest::test_load_with_single_target",
"test/test_task_on_kart.py::TaskTest::test_make_model_target",
"test/test_task_on_kart.py::TaskTest::test_make_target",
"test/test_task_on_kart.py::TaskTest::test_make_target_with_processor",
"test/test_task_on_kart.py::TaskTest::test_make_target_without_id",
"test/test_task_on_kart.py::TaskTest::test_repr",
"test/test_task_on_kart.py::TaskTest::test_significant_flag",
"test/test_task_on_kart.py::TaskTest::test_use_rerun_with_inherits"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-29 01:30:47+00:00
|
mit
| 3,663
|
|
tern-tools__tern-764
|
diff --git a/tern/analyze/common.py b/tern/analyze/common.py
index df4889a..a9de792 100644
--- a/tern/analyze/common.py
+++ b/tern/analyze/common.py
@@ -31,9 +31,11 @@ logger = logging.getLogger(constants.logger_name)
def get_shell_commands(shell_command_line):
- '''Given a shell command line, get a list of Command objects'''
+ '''Given a shell command line, get a list of Command objects and report on
+ branch statements'''
statements = general.split_command(shell_command_line)
command_list = []
+ branch_report = ''
# traverse the statements, pick out the loop and commands.
for stat in statements:
if 'command' in stat:
@@ -43,7 +45,13 @@ def get_shell_commands(shell_command_line):
for st in loop_stat:
if 'command' in st:
command_list.append(Command(st['command']))
- return command_list
+ elif 'branch' in stat:
+ branch_report = branch_report + '\n'.join(stat['content']) + '\n\n'
+ if branch_report:
+ # add prefix
+ branch_report = '\nNon-deterministic branching statement: \n' + \
+ branch_report
+ return command_list, branch_report
def load_from_cache(layer, redo=False):
@@ -478,7 +486,7 @@ def filter_install_commands(shell_command_line):
3. Return installed command objects, and messages for ignored commands
and unrecognized commands'''
report = ''
- command_list = get_shell_commands(shell_command_line)
+ command_list, branch_report = get_shell_commands(shell_command_line)
for command in command_list:
command_lib.set_command_attrs(command)
ignore_msgs, filter1 = remove_ignored_commands(command_list)
@@ -487,7 +495,8 @@ def filter_install_commands(shell_command_line):
report = report + formats.ignored + ignore_msgs
if unrec_msgs:
report = report + formats.unrecognized + unrec_msgs
-
+ if branch_report:
+ report = report + branch_report
return consolidate_commands(filter2), report
|
tern-tools/tern
|
044dc470ec5be8aacbc085a5ae307c608ff13255
|
diff --git a/tests/test_analyze_common.py b/tests/test_analyze_common.py
index e40445c..82aba50 100644
--- a/tests/test_analyze_common.py
+++ b/tests/test_analyze_common.py
@@ -32,10 +32,18 @@ class TestAnalyzeCommon(unittest.TestCase):
del self.test_dockerfile
def testGetShellCommands(self):
- command = common.get_shell_commands("yum install nfs-utils")
+ command, _ = common.get_shell_commands("yum install nfs-utils")
self.assertEqual(type(command), list)
self.assertEqual(len(command), 1)
self.assertEqual(command[0].options, self.command1.options)
+ # test on branching command
+ branching_script = "if [ -z $var ]; then yum install nfs-utils; fi"
+ branch_command, report = common.get_shell_commands(branching_script)
+ self.assertEqual(type(branch_command), list)
+ # we will ignore branching command, so len should be 0
+ self.assertEqual(len(branch_command), 0)
+ # and the report should not be None
+ self.assertTrue(report)
def testLoadFromCache(self):
'''Given a layer object, populate the given layer in case the cache isn't empty'''
|
Report RUN command statements that are non-deterministic like if and case statements
**Describe the Feature**
Currently, the report notices just says that if, case and for statements are unrecognizable. It would be nice to add notices for non-deterministic branching statements like if and case statements as you could only know the status of the branch at build time.
**Implementation Changes**
The new shell script parser produces a dictionary that will identify if and case statements. We could just look at this object and extract the if and case statements to create notices for them.
|
0.0
|
044dc470ec5be8aacbc085a5ae307c608ff13255
|
[
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetShellCommands"
] |
[
"tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithDifferentCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testConsolidateCommandsWithSameCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testFilterInstallCommands",
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithInstallFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testGetInstalledPackageNamesWithRemoveFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFilesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadNoticesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testLoadPackagesFromCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithIgnoreFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveIgnoredCommandsWithoutIgnoreFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testRemoveUnrecognizedCommandsWithoutFlag",
"tests/test_analyze_common.py::TestAnalyzeCommon::testSaveToCache",
"tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithPackages",
"tests/test_analyze_common.py::TestAnalyzeCommon::testUpdateMasterListWithoutPackages"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-11 01:09:53+00:00
|
bsd-2-clause
| 5,854
|
|
tox-dev__tox-docker-19
|
diff --git a/README.md b/README.md
index c99702c..efd4f4f 100644
--- a/README.md
+++ b/README.md
@@ -31,17 +31,24 @@ your test suite as it runs, as ordinary environment variables:
POSTGRES_USER=username
POSTGRES_DB=dbname
-## Port Mapping
+## Host and Port Mapping
tox-docker runs docker with the "publish all ports" option. Any port the
container exposes will be made available to your test suite via environment
-variables of the form `<image-basename>_<exposed-port>_<proto>`. For
+variables of the form `<image-basename>_<exposed-port>_<protocol>_PORT`. For
instance, for the postgresql container, there will be an environment
-variable `POSTGRES_5432_TCP` whose value is the ephemeral port number that
-docker has bound the container's port 5432 to.
+variable `POSTGRES_5432_TCP_PORT` whose value is the ephemeral port number
+that docker has bound the container's port 5432 to.
Likewise, exposed UDP ports will have environment variables like
-`TELEGRAF_8092_UDP` whose value is the ephemeral port number that docker has
-bound. NB! Since it's not possible to check whether UDP port is open it's
-just mapping to environment variable without any checks that service up and
-running.
+`TELEGRAF_8092_UDP_PORT` Since it's not possible to check whether UDP port
+is open it's just mapping to environment variable without any checks that
+service up and running.
+
+The host name for each service is also exposed via environment as
+`<image-basename>_HOST`, which is `POSTGRES_HOST` and `TELEGRAF_HOST` for
+the two examples above.
+
+*Deprecation Note:* In older versions of tox-docker, the port was exposed as
+`<image-basename>-<exposed-port>-<protocol>`. This additional environment
+variable is deprecated, but will be supported until tox-docker 2.0.
diff --git a/tox.ini b/tox.ini
index cafee7d..f20bc70 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,11 +1,18 @@
[tox]
-envlist = py27
+envlist = integration,registry
-[testenv]
+[testenv:integration]
docker =
nginx:1.13-alpine
- telegraf:1.8-alpine
+ ksdn117/tcp-udp-test
dockerenv =
ENV_VAR=env-var-value
deps = pytest
commands = py.test [] test_integration.py
+
+[testenv:registry]
+docker = docker.io/library/nginx:1.13-alpine
+dockerenv =
+ ENV_VAR=env-var-value
+deps = pytest
+commands = py.test [] test_registry.py
diff --git a/tox_docker.py b/tox_docker.py
index a120a61..8095735 100644
--- a/tox_docker.py
+++ b/tox_docker.py
@@ -7,6 +7,28 @@ from docker.errors import ImageNotFound
import docker as docker_module
+def escape_env_var(varname):
+ """
+ Convert a string to a form suitable for use as an environment variable.
+
+ The result will be all uppercase, and will have all invalid characters
+ replaced by an underscore.
+
+ The result will match the following regex: [a-zA-Z_][a-zA-Z0-9_]*
+
+ Example:
+ "my.private.registry/cat/image" will become
+ "MY_PRIVATE_REGISTRY_CAT_IMAGE"
+ """
+ varname = list(varname.upper())
+ if not varname[0].isalpha():
+ varname[0] = '_'
+ for i, c in enumerate(varname):
+ if not c.isalnum() and c != '_':
+ varname[i] = '_'
+ return "".join(varname)
+
+
def _newaction(venv, message):
try:
# tox 3.7 and later
@@ -62,20 +84,33 @@ def tox_runtest_pre(venv):
conf._docker_containers.append(container)
container.reload()
+ gateway_ip = container.attrs["NetworkSettings"]["Gateway"] or "0.0.0.0"
for containerport, hostports in container.attrs["NetworkSettings"]["Ports"].items():
- hostport = None
+
for spec in hostports:
if spec["HostIp"] == "0.0.0.0":
hostport = spec["HostPort"]
break
-
- if not hostport:
+ else:
continue
- envvar = "{}_{}".format(
- name.upper(),
- containerport.replace("/", "_").upper(),
- )
+ envvar = escape_env_var("{}_HOST".format(
+ name,
+ ))
+ venv.envconfig.setenv[envvar] = gateway_ip
+
+ envvar = escape_env_var("{}_{}_PORT".format(
+ name,
+ containerport,
+ ))
+ venv.envconfig.setenv[envvar] = hostport
+
+ # TODO: remove in 2.0
+ _, proto = containerport.split("/")
+ envvar = escape_env_var("{}_{}".format(
+ name,
+ containerport,
+ ))
venv.envconfig.setenv[envvar] = hostport
_, proto = containerport.split("/")
@@ -88,7 +123,7 @@ def tox_runtest_pre(venv):
while (time.time() - start) < 30:
try:
sock = socket.create_connection(
- address=("0.0.0.0", int(hostport)),
+ address=(gateway_ip, int(hostport)),
timeout=0.1,
)
except socket.error:
|
tox-dev/tox-docker
|
c571732e0c606a1cde123bf6899a7c246ba2e44e
|
diff --git a/test_integration.py b/test_integration.py
index 2c672fe..4a0be70 100644
--- a/test_integration.py
+++ b/test_integration.py
@@ -1,6 +1,10 @@
import os
import unittest
-import urllib2
+
+try:
+ from urllib.request import urlopen
+except ImportError:
+ from urllib2 import urlopen
class ToxDockerIntegrationTest(unittest.TestCase):
@@ -12,13 +16,30 @@ class ToxDockerIntegrationTest(unittest.TestCase):
def test_it_sets_automatic_env_vars(self):
# the nginx image we use exposes port 80
+ self.assertIn("NGINX_HOST", os.environ)
self.assertIn("NGINX_80_TCP", os.environ)
- # the telegraf image we use exposes UDP port 8092
- self.assertIn("TELEGRAF_8092_UDP", os.environ)
+ self.assertIn("NGINX_80_TCP_PORT", os.environ)
+ self.assertEqual(
+ os.environ["NGINX_80_TCP_PORT"],
+ os.environ["NGINX_80_TCP"],
+ )
+
+ # the test image we use exposes TCP port 1234 and UDP port 5678
+ self.assertIn("KSDN117_TCP_UDP_TEST_1234_TCP", os.environ)
+ self.assertIn("KSDN117_TCP_UDP_TEST_1234_TCP_PORT", os.environ)
+ self.assertEqual(
+ os.environ["KSDN117_TCP_UDP_TEST_1234_TCP_PORT"],
+ os.environ["KSDN117_TCP_UDP_TEST_1234_TCP"],
+ )
+ self.assertIn("KSDN117_TCP_UDP_TEST_5678_UDP_PORT", os.environ)
+ self.assertEqual(
+ os.environ["KSDN117_TCP_UDP_TEST_5678_UDP_PORT"],
+ os.environ["KSDN117_TCP_UDP_TEST_5678_UDP"],
+ )
def test_it_exposes_the_port(self):
# the nginx image we use exposes port 80
- url = "http://127.0.0.1:{port}/".format(port=os.environ["NGINX_80_TCP"])
- response = urllib2.urlopen(url)
+ url = "http://{host}:{port}/".format(host=os.environ["NGINX_HOST"], port=os.environ["NGINX_80_TCP"])
+ response = urlopen(url)
self.assertEqual(200, response.getcode())
- self.assertIn("Thank you for using nginx.", response.read())
+ self.assertIn("Thank you for using nginx.", str(response.read()))
diff --git a/test_registry.py b/test_registry.py
new file mode 100644
index 0000000..4884f36
--- /dev/null
+++ b/test_registry.py
@@ -0,0 +1,18 @@
+import os
+import unittest
+
+from tox_docker import escape_env_var
+
+
+class ToxDockerRegistryTest(unittest.TestCase):
+
+ def test_it_sets_automatic_env_vars(self):
+ # the nginx image we use exposes port 80
+ self.assertIn("DOCKER_IO_LIBRARY_NGINX_HOST", os.environ)
+ self.assertIn("DOCKER_IO_LIBRARY_NGINX_80_TCP", os.environ)
+
+ def test_escape_env_var(self):
+ self.assertEqual(
+ escape_env_var("my.private.registry/cat/image"),
+ "MY_PRIVATE_REGISTRY_CAT_IMAGE",
+ )
|
support for remote docker hosts
Docker itself could run on a remote machine and docker would use DOCKER_HOST variable to connect to it. Still, based on the fact that no IP address is returned in enviornment variables I suppose that tox-docker would not be able to work in this case.
This is a serious isse as now docker perfectly supports ssh protocol, and user can easily do `DOCKER_HOST=ssh://root@remote`.
```
File "/Users/ssbarnea/.local/lib/python2.7/site-packages/pluggy/callers.py", line 81, in get_result
_reraise(*ex) # noqa
File "/Users/ssbarnea/.local/lib/python2.7/site-packages/pluggy/callers.py", line 187, in _multicall
res = hook_impl.function(*args)
File "/Users/ssbarnea/os/tox-docker/tox_docker.py", line 94, in tox_runtest_pre
"Never got answer on port {} from {}".format(containerport, name)
Exception: Never got answer on port 8080/tcp from gerritcodereview/gerrit
tox -e py27 3.33s user 2.75s system 5% cpu 1:44.68 total
```
|
0.0
|
c571732e0c606a1cde123bf6899a7c246ba2e44e
|
[
"test_registry.py::ToxDockerRegistryTest::test_escape_env_var"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-14 13:57:01+00:00
|
bsd-3-clause
| 6,091
|
|
GSTT-CSC__hazen-363
|
diff --git a/hazenlib/__init__.py b/hazenlib/__init__.py
index dafcf1a..309c585 100644
--- a/hazenlib/__init__.py
+++ b/hazenlib/__init__.py
@@ -112,7 +112,7 @@ import importlib
import inspect
import logging
import sys
-import pprint
+import json
import os
from docopt import docopt
@@ -156,7 +156,6 @@ def init_task(selected_task, files, report, report_dir):
def main():
arguments = docopt(__doc__, version=__version__)
files = get_dicom_files(arguments['<folder>'])
- pp = pprint.PrettyPrinter(indent=4, depth=1, width=1)
# Set common options
log_levels = {
@@ -203,7 +202,8 @@ def main():
task = init_task(selected_task, files, report, report_dir)
result = task.run()
- print(pp.pformat(result))
+ result_string = json.dumps(result, indent=2)
+ print(result_string)
if __name__ == "__main__":
diff --git a/hazenlib/tasks/acr_snr.py b/hazenlib/tasks/acr_snr.py
index 087c3de..4dc21b3 100644
--- a/hazenlib/tasks/acr_snr.py
+++ b/hazenlib/tasks/acr_snr.py
@@ -74,7 +74,7 @@ class ACRSNR(HazenTask):
traceback.print_exc(file=sys.stdout)
- results = {self.key(snr_dcm): snr_results, 'reports': {'images': self.report_files}}
+ results = {self.key(snr_dcm): snr_results}
# only return reports if requested
if self.report:
diff --git a/hazenlib/tasks/uniformity.py b/hazenlib/tasks/uniformity.py
index 4e57e7e..e54d8b1 100644
--- a/hazenlib/tasks/uniformity.py
+++ b/hazenlib/tasks/uniformity.py
@@ -148,5 +148,5 @@ class Uniformity(HazenTask):
fig.savefig(img_path)
self.report_files.append(img_path)
- return {'horizontal': {'IPEM': fractional_uniformity_horizontal},
- 'vertical': {'IPEM': fractional_uniformity_vertical}}
+ return {'horizontal': fractional_uniformity_horizontal,
+ 'vertical': fractional_uniformity_vertical}
|
GSTT-CSC/hazen
|
5d9e4d747ccd1c908ac5cc0bfbbdb60c470e9bfb
|
diff --git a/tests/test_uniformity.py b/tests/test_uniformity.py
index 8fe2611..2fa43ec 100644
--- a/tests/test_uniformity.py
+++ b/tests/test_uniformity.py
@@ -18,8 +18,8 @@ class TestUniformity(unittest.TestCase):
def test_uniformity(self):
results = self.uniformity_task.run()
key = self.uniformity_task.key(self.uniformity_task.data[0])
- horizontal_ipem = results[key]['horizontal']['IPEM']
- vertical_ipem = results[key]['vertical']['IPEM']
+ horizontal_ipem = results[key]['horizontal']
+ vertical_ipem = results[key]['vertical']
print("\ntest_uniformity.py::TestUniformity::test_uniformity")
|
Improve displaying results on terminal
PrettyPrinter could be replaced with `json.dumps()` that can format and display the result dictionary in a simple structured way with less configuration needed.
currently with PrettyPrint: (more compact)
<img width="759" alt="Screenshot 2023-07-15 at 18 27 16" src="https://github.com/GSTT-CSC/hazen/assets/15593138/f22dad3d-c0b2-4b41-88b1-1a48d52758cf">
with json.dumps: (more easy to understand)
<img width="686" alt="image" src="https://github.com/GSTT-CSC/hazen/assets/15593138/63af6d46-fddc-4acc-84fc-da5a92c936f5">
|
0.0
|
5d9e4d747ccd1c908ac5cc0bfbbdb60c470e9bfb
|
[
"tests/test_uniformity.py::TestUniformity::test_uniformity",
"tests/test_uniformity.py::TestSagUniformity::test_uniformity",
"tests/test_uniformity.py::TestCorUniformity::test_uniformity"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-07-28 18:22:43+00:00
|
apache-2.0
| 261
|
|
fullflu__pydtr-9
|
diff --git a/README.md b/README.md
index 0210681..76d80b4 100644
--- a/README.md
+++ b/README.md
@@ -15,7 +15,7 @@ Pydtr enables you to implement DTR methods easily by using sklearn-based interfa
| Method | Single binary treatment | Multiple treatments | Multinomial treatment | Continuous Treatment |
| ---- | ---- | ---- | ---- | ---- |
-| IqLearnReg <br> (with sklearn) | :white_check_mark: | :white_check_mark: | :white_check_mark: <br>(with ordinal encoded treatment) |
+| IqLearnReg <br> (with sklearn) | :white_check_mark: | :white_check_mark: | :white_check_mark: <br>(with pipeline) |
| IqLearnReg <br> (with statsmodels) | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| GEstimation | WIP | | WIP | WIP |
@@ -25,21 +25,37 @@ When a treatment variable is multinomial and you use a sklearn model as a regres
G-estimation, a famous method of DTR, is now unavailable.
-## Requirement
+## Requirements
-- python (>= 3.6)
-- pip
+- python>=3.6
+- pandas>=1.1.2
+- scikit-learn>=0.23.2
+- numpy>=1.19.2
+- statsmodels>=0.12.0
-## Install
+## Installation
-`pip install pydtr`
+### From pypi
+
+```
+pip install pydtr
+```
+
+### From source
+
+```
+git clone https://github.com/fullflu/pydtr.git
+cd pydtr
+python setup.py install
+```
## Usage
### Iterative Q Learning (IqLearnReg)
You need to import libraries and prepare data.
-```
+
+```python
# import
import numpy as np
import pandas as pd
@@ -59,7 +75,8 @@ df["Y2"] = np.zeros(n)
```
You can use sklearn-based models.
-```
+
+```python
# set model info
model_info = [
{
@@ -89,7 +106,8 @@ opt_action_all_stages = dtr_model.predict_all_stages(df)
```
You can also use statsmodels-based models.
-```
+
+```python
# set model info
model_info = [
{
@@ -160,4 +178,4 @@ If all checkes have passed in pull-requests, I will merge and release them.
## References
-- Chakraborty, Bibhas. *Statistical methods for dynamic treatment regimes.* Springer, 2013.
+- Chakraborty, B, Moodie, EE. *Statistical Methods for Dynamic Treatment Regimes.* Springer, New York, 2013.
diff --git a/src/pydtr/iqlearn/base.py b/src/pydtr/iqlearn/base.py
index 52ca6e7..d08901d 100644
--- a/src/pydtr/iqlearn/base.py
+++ b/src/pydtr/iqlearn/base.py
@@ -6,7 +6,6 @@ from abc import ABCMeta, abstractmethod
import pandas as pd
import numpy as np
from sklearn.utils import resample
-from sklearn.utils.estimator_checks import check_estimator
class IqLearnBase(object):
@@ -103,10 +102,6 @@ class IqLearnBase(object):
size_bs = df.shape[0]
return resample(df, n_samples=size_bs)
- @staticmethod
- def _check_model_type(model):
- assert type(model) == str or check_estimator(model)
-
def fit(self, df: pd.DataFrame):
"""
Fit dtr models
@@ -136,16 +131,15 @@ class IqLearnBase(object):
# fit models using bootstrap
for i in range(self.n_bs):
df_i = self._sample_bs(df)
- print("{}th bootstrap".format(i))
for t in reversed(range(self.n_stages)):
# extract feature and outcome
- X = df[self.model_info[t]["feature"]]
- y = df[self.model_info[t]["outcome"]]
+ X = df_i[self.model_info[t]["feature"]]
+ y = df_i[self.model_info[t]["outcome"]]
if t == self.n_stages - 1:
p_outcome = y.values
else:
- X2 = df[self.model_info[t + 1]["feature"]]
- y2 = df[self.model_info[t + 1]["outcome"]]
+ X2 = df_i[self.model_info[t + 1]["feature"]]
+ y2 = df_i[self.model_info[t + 1]["outcome"]]
p_outcome = self._get_p_outcome(self.model_all[t + 1], X2, y2, t)
# fit model of stage t
self._fit_model(X, p_outcome, t, i)
@@ -172,9 +166,9 @@ class IqLearnBase(object):
def get_params(self) -> pd.DataFrame:
# get estimated parameters
params = pd.DataFrame()
- for t, m in enumerate(self.models[:-1]):
+ for t in reversed(range(self.n_stages)):
if type(self.model_info[t]["model"]) == str:
- tmp_df = pd.melt(pd.DataFrame([i.params for i in m]))
+ tmp_df = pd.melt(pd.DataFrame([i.params for i in self.models[t]]))
tmp_df["stage"] = t
params = pd.concat([params, tmp_df])
return params
diff --git a/src/pydtr/version.py b/src/pydtr/version.py
index f102a9c..3b93d0b 100644
--- a/src/pydtr/version.py
+++ b/src/pydtr/version.py
@@ -1,1 +1,1 @@
-__version__ = "0.0.1"
+__version__ = "0.0.2"
|
fullflu/pydtr
|
13d59092b7e3d289945611b27951e3011a9d3beb
|
diff --git a/tests/test_iqlearn_sklearn_predict.py b/tests/test_iqlearn_sklearn_predict.py
index faec86e..e8b822c 100644
--- a/tests/test_iqlearn_sklearn_predict.py
+++ b/tests/test_iqlearn_sklearn_predict.py
@@ -114,11 +114,11 @@ def test_iqlearn_regwrapper_rule():
assert len(dtr_model.models[0]) == 2
-def test_iqlearn_regwrapper_rf():
+def test_iqlearn_rf():
# setup params
n = 10
thres = int(n / 2)
- # sample rule base models
+ # rf models
model1 = RandomForestRegressor()
model2 = RandomForestRegressor()
# sample dataframe
@@ -143,7 +143,7 @@ def test_iqlearn_regwrapper_rf():
"outcome": "Y2"
}
]
- # fit model (dummy)
+ # fit model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info
@@ -163,7 +163,7 @@ def test_iqlearn_regwrapper_rf():
a2 = action_all.query("stage == 1")[["A2", "val"]].reset_index(drop=True)
assert_frame_equal(action_1, a1)
assert_frame_equal(action_2, a2)
- # fit bootstrap model (dummy)
+ # fit bootstrap model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info,
@@ -177,7 +177,7 @@ def test_iqlearn_regwrapper_rf_multiple_actions():
# setup params
n = 10
thres = int(n / 2)
- # sample rule base models
+ # rf models
model1 = RandomForestRegressor()
model2 = RandomForestRegressor()
# sample dataframe
@@ -202,7 +202,7 @@ def test_iqlearn_regwrapper_rf_multiple_actions():
"outcome": "Y2"
}
]
- # fit model (dummy)
+ # fit model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info
@@ -222,7 +222,7 @@ def test_iqlearn_regwrapper_rf_multiple_actions():
a2 = action_all.query("stage == 1")[["A1", "A2", "val"]].reset_index(drop=True)
assert_frame_equal(action_1, a1)
assert_frame_equal(action_2, a2)
- # fit bootstrap model (dummy)
+ # fit bootstrap model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info,
@@ -236,7 +236,7 @@ def test_iqlearn_regwrapper_rf_ordinalencoder():
# setup params
n = 30
thres = int(n / 2)
- # sample rule base models
+ # rf models
model1 = RandomForestRegressor()
model2 = RandomForestRegressor()
# sample dataframe
@@ -261,7 +261,7 @@ def test_iqlearn_regwrapper_rf_ordinalencoder():
"outcome": "Y2"
}
]
- # fit model (dummy)
+ # fit model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info
@@ -281,7 +281,7 @@ def test_iqlearn_regwrapper_rf_ordinalencoder():
a2 = action_all.query("stage == 1")[["A2", "val"]].reset_index(drop=True)
assert_frame_equal(action_1, a1)
assert_frame_equal(action_2, a2)
- # fit bootstrap model (dummy)
+ # fit bootstrap model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info,
diff --git a/tests/test_iqlearn_sm_predict.py b/tests/test_iqlearn_sm_predict.py
index 448ab14..64dfc73 100644
--- a/tests/test_iqlearn_sm_predict.py
+++ b/tests/test_iqlearn_sm_predict.py
@@ -10,7 +10,7 @@ def test_iqlearn_sm():
# setup params
n = 10
thres = int(n / 2)
- # sample rule base models
+ # statsmodels
model1 = "p_outcome ~ L1 * A1"
model2 = "p_outcome ~ L1 + A1 + Y1 * A2"
# sample dataframe
@@ -35,7 +35,7 @@ def test_iqlearn_sm():
"outcome": "Y2"
}
]
- # fit model (dummy)
+ # fit model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info
@@ -55,7 +55,7 @@ def test_iqlearn_sm():
a2 = action_all.query("stage == 1")[["A2", "val"]].reset_index(drop=True)
assert_frame_equal(action_1, a1)
assert_frame_equal(action_2, a2)
- # fit bootstrap model (dummy)
+ # fit bootstrap model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info,
@@ -69,7 +69,7 @@ def test_iqlearn_sm_multiple_actions():
# setup params
n = 10
thres = int(n / 2)
- # sample rule base models
+ # statsmodels
model1 = "p_outcome ~ L1 * A1"
model2 = "p_outcome ~ L1 + A1 + Y1 * A2"
# sample dataframe
@@ -94,7 +94,7 @@ def test_iqlearn_sm_multiple_actions():
"outcome": "Y2"
}
]
- # fit model (dummy)
+ # fit model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info
@@ -114,7 +114,7 @@ def test_iqlearn_sm_multiple_actions():
a2 = action_all.query("stage == 1")[["A1", "A2", "val"]].reset_index(drop=True)
assert_frame_equal(action_1, a1)
assert_frame_equal(action_2, a2)
- # fit bootstrap model (dummy)
+ # fit bootstrap model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info,
@@ -128,7 +128,7 @@ def test_iqlearn_sm_multinomial_action():
# setup params
n = 30
thres = int(n / 2)
- # sample rule base models
+ # statsmodels
model1 = "p_outcome ~ L1 * C(A1)"
model2 = "p_outcome ~ L1 + A1 + Y1 * C(A2)"
# sample dataframe
@@ -153,7 +153,7 @@ def test_iqlearn_sm_multinomial_action():
"outcome": "Y2"
}
]
- # fit model (dummy)
+ # fit model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info
@@ -173,7 +173,7 @@ def test_iqlearn_sm_multinomial_action():
a2 = action_all.query("stage == 1")[["A2", "val"]].reset_index(drop=True)
assert_frame_equal(action_1, a1)
assert_frame_equal(action_2, a2)
- # fit bootstrap model (dummy)
+ # fit bootstrap model
dtr_model = IqLearnReg(
n_stages=2,
model_info=model_info,
@@ -181,3 +181,48 @@ def test_iqlearn_sm_multinomial_action():
)
dtr_model.fit(df)
assert len(dtr_model.models[0]) == 2
+
+
+def test_iqlearn_sm_get_params():
+ # setup params
+ n = 300
+ thres = int(n / 2)
+ # statsmodels
+ model1 = "p_outcome ~ L1 * A1"
+ model2 = "p_outcome ~ L1 + A1 + Y1 * A2"
+ # sample dataframe
+ df = pd.DataFrame()
+ df["L1"] = np.random.normal(0, size=n)
+ df["A1"] = [0, 1] * int(n / 2)
+ df["A2"] = [0] * int(n / 2) + [1] * int(n / 2)
+ df["Y1"] = df["L1"] * df["A1"] + np.random.normal(0, scale=5, size=n)
+ df["Y2"] = df["A1"] + df["Y1"] * df["A2"] + np.random.normal(0, scale=5, size=n)
+ # set model info
+ model_info = [
+ {
+ "model": model1,
+ "action_dict": {"A1": [0, 1]},
+ "feature": ["L1", "A1"],
+ "outcome": "Y1"
+ },
+ {
+ "model": model2,
+ "action_dict": {"A1": [0, 1], "A2": [0, 1]},
+ "feature": ["L1", "A1", "Y1", "A2"],
+ "outcome": "Y2"
+ }
+ ]
+ # fit bootstrap model
+ dtr_model = IqLearnReg(
+ n_stages=2,
+ model_info=model_info,
+ n_bs=10
+ )
+ dtr_model.fit(df)
+ # get params
+ params = dtr_model.get_params()
+ l1_unique_shape = params.query("stage == 0 & variable == 'L1'")["value"].unique().shape[0]
+ a1_unique_shape = params.query("stage == 0 & variable == 'A1'")["value"].unique().shape[0]
+ a2_unique_shape = params.query("stage == 1 & variable == 'A2'")["value"].unique().shape[0]
+ assert l1_unique_shape != 1 or a1_unique_shape != 1 or a2_unique_shape != 1
+ assert len(dtr_model.models[0]) == 10
|
fix get_params
# WHY
There is a bug in the `get_params` method (`self.models` cannot be sliced) :
```
for t, m in enumerate(self.models[:-1]): <-
```
# TODO
- [ ] fix the function
- [ ] add tests of the function
|
0.0
|
13d59092b7e3d289945611b27951e3011a9d3beb
|
[
"tests/test_iqlearn_sm_predict.py::test_iqlearn_sm_get_params"
] |
[
"tests/test_iqlearn_sklearn_predict.py::test_iqlearn_regwrapper_rule",
"tests/test_iqlearn_sklearn_predict.py::test_iqlearn_rf",
"tests/test_iqlearn_sklearn_predict.py::test_iqlearn_regwrapper_rf_multiple_actions",
"tests/test_iqlearn_sklearn_predict.py::test_iqlearn_regwrapper_rf_ordinalencoder",
"tests/test_iqlearn_sm_predict.py::test_iqlearn_sm",
"tests/test_iqlearn_sm_predict.py::test_iqlearn_sm_multiple_actions",
"tests/test_iqlearn_sm_predict.py::test_iqlearn_sm_multinomial_action"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-24 10:42:12+00:00
|
bsd-3-clause
| 2,431
|
|
jupyter-server__jupyter_server-574
|
diff --git a/jupyter_server/services/contents/filemanager.py b/jupyter_server/services/contents/filemanager.py
index 376a8db62..54e2dca38 100644
--- a/jupyter_server/services/contents/filemanager.py
+++ b/jupyter_server/services/contents/filemanager.py
@@ -119,6 +119,16 @@ class FileContentsManager(FileManagerMixin, ContentsManager):
deleting files really deletes them.""",
)
+ always_delete_dir = Bool(
+ False,
+ config=True,
+ help="""If True, deleting a non-empty directory will always be allowed.
+ WARNING this may result in files being permanently removed; e.g. on Windows,
+ if the data size is too big for the trash/recycle bin the directory will be permanently
+ deleted. If False (default), the non-empty directory will be sent to the trash only
+ if safe. And if ``delete_to_trash`` is True, the directory won't be deleted.""",
+ )
+
@default("files_handler_class")
def _files_handler_class_default(self):
return AuthenticatedFileHandler
@@ -331,7 +341,10 @@ class FileContentsManager(FileManagerMixin, ContentsManager):
if content:
content, format = self._read_file(os_path, format)
if model["mimetype"] is None:
- default_mime = {"text": "text/plain", "base64": "application/octet-stream"}[format]
+ default_mime = {
+ "text": "text/plain",
+ "base64": "application/octet-stream",
+ }[format]
model["mimetype"] = default_mime
model.update(
@@ -391,7 +404,9 @@ class FileContentsManager(FileManagerMixin, ContentsManager):
if os.path.isdir(os_path):
if type not in (None, "directory"):
raise web.HTTPError(
- 400, u"%s is a directory, not a %s" % (path, type), reason="bad type"
+ 400,
+ u"%s is a directory, not a %s" % (path, type),
+ reason="bad type",
)
model = self._dir_model(path, content=content)
elif type == "notebook" or (type is None and path.endswith(".ipynb")):
@@ -494,7 +509,7 @@ class FileContentsManager(FileManagerMixin, ContentsManager):
return False
if self.delete_to_trash:
- if sys.platform == "win32" and is_non_empty_dir(os_path):
+ if not self.always_delete_dir and sys.platform == "win32" and is_non_empty_dir(os_path):
# send2trash can really delete files on Windows, so disallow
# deleting non-empty files. See Github issue 3631.
raise web.HTTPError(400, u"Directory %s not empty" % os_path)
@@ -507,12 +522,13 @@ class FileContentsManager(FileManagerMixin, ContentsManager):
return
else:
self.log.warning(
- "Skipping trash for %s, on different device " "to home directory", os_path
+ "Skipping trash for %s, on different device " "to home directory",
+ os_path,
)
if os.path.isdir(os_path):
# Don't permanently delete non-empty directories.
- if is_non_empty_dir(os_path):
+ if not self.always_delete_dir and is_non_empty_dir(os_path):
raise web.HTTPError(400, u"Directory %s not empty" % os_path)
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
@@ -649,7 +665,10 @@ class AsyncFileContentsManager(FileContentsManager, AsyncFileManagerMixin, Async
if content:
content, format = await self._read_file(os_path, format)
if model["mimetype"] is None:
- default_mime = {"text": "text/plain", "base64": "application/octet-stream"}[format]
+ default_mime = {
+ "text": "text/plain",
+ "base64": "application/octet-stream",
+ }[format]
model["mimetype"] = default_mime
model.update(
@@ -709,7 +728,9 @@ class AsyncFileContentsManager(FileContentsManager, AsyncFileManagerMixin, Async
if os.path.isdir(os_path):
if type not in (None, "directory"):
raise web.HTTPError(
- 400, u"%s is a directory, not a %s" % (path, type), reason="bad type"
+ 400,
+ u"%s is a directory, not a %s" % (path, type),
+ reason="bad type",
)
model = await self._dir_model(path, content=content)
elif type == "notebook" or (type is None and path.endswith(".ipynb")):
@@ -813,7 +834,11 @@ class AsyncFileContentsManager(FileContentsManager, AsyncFileManagerMixin, Async
return False
if self.delete_to_trash:
- if sys.platform == "win32" and await is_non_empty_dir(os_path):
+ if (
+ not self.always_delete_dir
+ and sys.platform == "win32"
+ and await is_non_empty_dir(os_path)
+ ):
# send2trash can really delete files on Windows, so disallow
# deleting non-empty files. See Github issue 3631.
raise web.HTTPError(400, u"Directory %s not empty" % os_path)
@@ -826,12 +851,13 @@ class AsyncFileContentsManager(FileContentsManager, AsyncFileManagerMixin, Async
return
else:
self.log.warning(
- "Skipping trash for %s, on different device " "to home directory", os_path
+ "Skipping trash for %s, on different device " "to home directory",
+ os_path,
)
if os.path.isdir(os_path):
# Don't permanently delete non-empty directories.
- if await is_non_empty_dir(os_path):
+ if not self.always_delete_dir and await is_non_empty_dir(os_path):
raise web.HTTPError(400, u"Directory %s not empty" % os_path)
self.log.debug("Removing directory %s", os_path)
with self.perm_to_403():
|
jupyter-server/jupyter_server
|
90f619cf11eec842a27eec25692f9d65adccf169
|
diff --git a/jupyter_server/tests/services/contents/test_manager.py b/jupyter_server/tests/services/contents/test_manager.py
index 9063d2249..386d3ed60 100644
--- a/jupyter_server/tests/services/contents/test_manager.py
+++ b/jupyter_server/tests/services/contents/test_manager.py
@@ -513,6 +513,47 @@ async def test_delete(jp_contents_manager):
await ensure_async(cm.get(path))
[email protected](
+ "delete_to_trash, always_delete, error",
+ (
+ [True, True, False],
+ # on linux test folder may not be on home folder drive
+ # => if this is the case, _check_trash will be False
+ [True, False, None],
+ [False, True, False],
+ [False, False, True],
+ ),
+)
+async def test_delete_non_empty_folder(delete_to_trash, always_delete, error, jp_contents_manager):
+ cm = jp_contents_manager
+ cm.delete_to_trash = delete_to_trash
+ cm.always_delete_dir = always_delete
+
+ dir = "to_delete"
+
+ await make_populated_dir(cm, dir)
+ await check_populated_dir_files(cm, dir)
+
+ if error is None:
+ error = False
+ if sys.platform == "win32":
+ error = True
+ elif sys.platform == "linux":
+ file_dev = os.stat(cm.root_dir).st_dev
+ home_dev = os.stat(os.path.expanduser("~")).st_dev
+ error = file_dev != home_dev
+
+ if error:
+ with pytest.raises(
+ HTTPError,
+ match=r"HTTP 400: Bad Request \(Directory .*?to_delete not empty\)",
+ ):
+ await ensure_async(cm.delete_file(dir))
+ else:
+ await ensure_async(cm.delete_file(dir))
+ assert cm.dir_exists(dir) == False
+
+
async def test_rename(jp_contents_manager):
cm = jp_contents_manager
# Create a new notebook
|
Allow non-empty directory deletion for windows through settings
Hey would it be ok to allow non-empty directory deletion for windows through settings?
https://github.com/jupyter-server/jupyter_server/blob/f914126d173dde7d2b2f6a6d474648060c823419/jupyter_server/services/contents/filemanager.py#L492
|
0.0
|
90f619cf11eec842a27eec25692f9d65adccf169
|
[
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager0-False-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager1-False-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager2-False-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager3-False-True-False]"
] |
[
"jupyter_server/tests/services/contents/test_manager.py::test_root_dir[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_root_dir[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_missing_root_dir[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_missing_root_dir[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_invalid_root_dir[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_invalid_root_dir[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_get_os_path[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_get_os_path[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_checkpoint_subdir[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_checkpoint_subdir[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_bad_symlink[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_bad_symlink[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_recursive_symlink[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_recursive_symlink[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_good_symlink[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_good_symlink[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_escape_root[FileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_escape_root[AsyncFileContentsManager]",
"jupyter_server/tests/services/contents/test_manager.py::test_new_untitled[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_new_untitled[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_new_untitled[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_new_untitled[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_modified_date[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_modified_date[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_modified_date[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_modified_date[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_get[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_get[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_get[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_get[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_update[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_update[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_update[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_update[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_save[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_save[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_save[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_save[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager0-True-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager0-True-False-None]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager0-False-False-True]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager1-True-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager1-True-False-None]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager1-False-False-True]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager2-True-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager2-True-False-None]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager2-False-False-True]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager3-True-True-False]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager3-True-False-None]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_non_empty_folder[jp_contents_manager3-False-False-True]",
"jupyter_server/tests/services/contents/test_manager.py::test_rename[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_rename[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_rename[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_rename[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_root[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_root[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_root[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_delete_root[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_copy[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_copy[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_copy[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_copy[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_mark_trusted_cells[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_mark_trusted_cells[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_mark_trusted_cells[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_mark_trusted_cells[jp_contents_manager3]",
"jupyter_server/tests/services/contents/test_manager.py::test_check_and_sign[jp_contents_manager0]",
"jupyter_server/tests/services/contents/test_manager.py::test_check_and_sign[jp_contents_manager1]",
"jupyter_server/tests/services/contents/test_manager.py::test_check_and_sign[jp_contents_manager2]",
"jupyter_server/tests/services/contents/test_manager.py::test_check_and_sign[jp_contents_manager3]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-08-20 09:57:24+00:00
|
bsd-3-clause
| 3,360
|
|
simonw__sqlite-utils-142
|
diff --git a/docs/python-api.rst b/docs/python-api.rst
index 9697fd3..5860397 100644
--- a/docs/python-api.rst
+++ b/docs/python-api.rst
@@ -410,7 +410,7 @@ Use it like this:
"is_good_dog": True,
}], pk="id", column_order=("id", "twitter", "name"))
-The column types used in the ``CREATE TABLE`` statement are automatically derived from the types of data in that first batch of rows. Any additional or missing columns in subsequent batches will be ignored.
+The column types used in the ``CREATE TABLE`` statement are automatically derived from the types of data in that first batch of rows. Any additional columns in subsequent batches will cause a ``sqlite3.OperationalError`` exception to be raised unless the ``alter=True`` argument is supplied, in which case the new columns will be created.
The function can accept an iterator or generator of rows and will commit them according to the batch size. The default batch size is 100, but you can specify a different size using the ``batch_size`` parameter:
diff --git a/sqlite_utils/db.py b/sqlite_utils/db.py
index a8791c3..75599f6 100644
--- a/sqlite_utils/db.py
+++ b/sqlite_utils/db.py
@@ -1074,6 +1074,14 @@ class Table(Queryable):
all_columns = list(sorted(all_columns))
if hash_id:
all_columns.insert(0, hash_id)
+ else:
+ all_columns += [
+ column
+ for record in chunk
+ for column in record
+ if column not in all_columns
+ ]
+
validate_column_names(all_columns)
first = False
# values is the list of insert data that is passed to the
|
simonw/sqlite-utils
|
ea87c2b943fdd162c42a900ac0aea5ecc2f4b9d9
|
diff --git a/tests/test_create.py b/tests/test_create.py
index a84eb8d..fc8edc0 100644
--- a/tests/test_create.py
+++ b/tests/test_create.py
@@ -707,13 +707,24 @@ def test_insert_thousands_using_generator(fresh_db):
assert 10000 == fresh_db["test"].count
-def test_insert_thousands_ignores_extra_columns_after_first_100(fresh_db):
+def test_insert_thousands_raises_exception_wtih_extra_columns_after_first_100(fresh_db):
+ # https://github.com/simonw/sqlite-utils/issues/139
+ with pytest.raises(Exception, match="table test has no column named extra"):
+ fresh_db["test"].insert_all(
+ [{"i": i, "word": "word_{}".format(i)} for i in range(100)]
+ + [{"i": 101, "extra": "This extra column should cause an exception"}],
+ )
+
+
+def test_insert_thousands_adds_extra_columns_after_first_100_with_alter(fresh_db):
+ # https://github.com/simonw/sqlite-utils/issues/139
fresh_db["test"].insert_all(
[{"i": i, "word": "word_{}".format(i)} for i in range(100)]
- + [{"i": 101, "extra": "This extra column should cause an exception"}]
+ + [{"i": 101, "extra": "Should trigger ALTER"}],
+ alter=True,
)
rows = fresh_db.execute_returning_dicts("select * from test where i = 101")
- assert [{"i": 101, "word": None}] == rows
+ assert [{"i": 101, "word": None, "extra": "Should trigger ALTER"}] == rows
def test_insert_ignore(fresh_db):
|
insert_all(..., alter=True) should work for new columns introduced after the first 100 records
Is there a way to make `.insert_all()` work properly when new columns are introduced outside the first 100 records (with or without the `alter=True` argument)?
I'm using `.insert_all()` to bulk insert ~3-4k records at a time and it is common for records to need to introduce new columns. However, if new columns are introduced after the first 100 records, `sqlite_utils` doesn't even raise the `OperationalError: table ... has no column named ...` exception; it just silently drops the extra data and moves on.
It took me a while to find this little snippet in the [documentation for `.insert_all()`](https://sqlite-utils.readthedocs.io/en/stable/python-api.html#bulk-inserts) (it's not mentioned under [Adding columns automatically on insert/update](https://sqlite-utils.readthedocs.io/en/stable/python-api.html#bulk-inserts)):
> The column types used in the CREATE TABLE statement are automatically derived from the types of data in that first batch of rows. **_Any additional or missing columns in subsequent batches will be ignored._**
I tried changing the `batch_size` argument to the total number of records, but it seems only to effect the number of rows that are committed at a time, and has no influence on this problem.
Is there a way around this that you would suggest? It seems like it should raise an exception at least.
|
0.0
|
ea87c2b943fdd162c42a900ac0aea5ecc2f4b9d9
|
[
"tests/test_create.py::test_insert_thousands_raises_exception_wtih_extra_columns_after_first_100",
"tests/test_create.py::test_insert_thousands_adds_extra_columns_after_first_100_with_alter"
] |
[
"tests/test_create.py::test_create_table",
"tests/test_create.py::test_create_table_compound_primary_key",
"tests/test_create.py::test_create_table_with_bad_defaults",
"tests/test_create.py::test_create_table_with_invalid_column_characters",
"tests/test_create.py::test_create_table_with_defaults",
"tests/test_create.py::test_create_table_with_bad_not_null",
"tests/test_create.py::test_create_table_with_not_null",
"tests/test_create.py::test_create_table_from_example[example0-expected_columns0]",
"tests/test_create.py::test_create_table_from_example[example1-expected_columns1]",
"tests/test_create.py::test_create_table_from_example[example2-expected_columns2]",
"tests/test_create.py::test_create_table_from_example[example3-expected_columns3]",
"tests/test_create.py::test_create_table_from_example[example4-expected_columns4]",
"tests/test_create.py::test_create_table_from_example[example5-expected_columns5]",
"tests/test_create.py::test_create_table_from_example_with_compound_primary_keys",
"tests/test_create.py::test_create_table_with_custom_columns[insert]",
"tests/test_create.py::test_create_table_with_custom_columns[upsert]",
"tests/test_create.py::test_create_table_with_custom_columns[insert_all]",
"tests/test_create.py::test_create_table_with_custom_columns[upsert_all]",
"tests/test_create.py::test_create_table_column_order[True]",
"tests/test_create.py::test_create_table_column_order[False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification0-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification1-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification2-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification3-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification4-NoObviousTable]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification5-AssertionError]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification6-AlterError]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[True-foreign_key_specification7-AssertionError]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification0-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification1-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification2-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification3-False]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification4-NoObviousTable]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification5-AssertionError]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification6-AlterError]",
"tests/test_create.py::test_create_table_works_for_m2m_with_only_foreign_keys[False-foreign_key_specification7-AssertionError]",
"tests/test_create.py::test_create_error_if_invalid_foreign_keys",
"tests/test_create.py::test_add_column[nickname-str-None-CREATE",
"tests/test_create.py::test_add_column[dob-date-None-CREATE",
"tests/test_create.py::test_add_column[age-int-None-CREATE",
"tests/test_create.py::test_add_column[weight-float-None-CREATE",
"tests/test_create.py::test_add_column[text-TEXT-None-CREATE",
"tests/test_create.py::test_add_column[integer-INTEGER-None-CREATE",
"tests/test_create.py::test_add_column[float-FLOAT-None-CREATE",
"tests/test_create.py::test_add_column[blob-blob-None-CREATE",
"tests/test_create.py::test_add_column[default_str-None-None-CREATE",
"tests/test_create.py::test_add_column[nickname-str--CREATE",
"tests/test_create.py::test_add_column[nickname-str-dawg's",
"tests/test_create.py::test_add_foreign_key",
"tests/test_create.py::test_add_foreign_key_error_if_column_does_not_exist",
"tests/test_create.py::test_add_foreign_key_error_if_other_table_does_not_exist",
"tests/test_create.py::test_add_foreign_key_error_if_already_exists",
"tests/test_create.py::test_add_foreign_keys",
"tests/test_create.py::test_add_column_foreign_key",
"tests/test_create.py::test_add_foreign_key_guess_table",
"tests/test_create.py::test_index_foreign_keys",
"tests/test_create.py::test_insert_row_alter_table[True-extra_data0-expected_new_columns0]",
"tests/test_create.py::test_insert_row_alter_table[True-extra_data1-expected_new_columns1]",
"tests/test_create.py::test_insert_row_alter_table[True-extra_data2-expected_new_columns2]",
"tests/test_create.py::test_insert_row_alter_table[False-extra_data0-expected_new_columns0]",
"tests/test_create.py::test_insert_row_alter_table[False-extra_data1-expected_new_columns1]",
"tests/test_create.py::test_insert_row_alter_table[False-extra_data2-expected_new_columns2]",
"tests/test_create.py::test_insert_row_alter_table_invalid_column_characters",
"tests/test_create.py::test_insert_replace_rows_alter_table[True]",
"tests/test_create.py::test_insert_replace_rows_alter_table[False]",
"tests/test_create.py::test_bulk_insert_more_than_999_values",
"tests/test_create.py::test_error_if_more_than_999_columns[900-False]",
"tests/test_create.py::test_error_if_more_than_999_columns[999-False]",
"tests/test_create.py::test_error_if_more_than_999_columns[1000-True]",
"tests/test_create.py::test_create_index[columns0-None-expected_index0]",
"tests/test_create.py::test_create_index[columns1-None-expected_index1]",
"tests/test_create.py::test_create_index[columns2-age_index-expected_index2]",
"tests/test_create.py::test_create_index_unique",
"tests/test_create.py::test_create_index_if_not_exists",
"tests/test_create.py::test_insert_dictionaries_and_lists_as_json[data_structure0]",
"tests/test_create.py::test_insert_dictionaries_and_lists_as_json[data_structure1]",
"tests/test_create.py::test_insert_dictionaries_and_lists_as_json[data_structure2]",
"tests/test_create.py::test_insert_dictionaries_and_lists_as_json[data_structure3]",
"tests/test_create.py::test_insert_dictionaries_and_lists_as_json[data_structure4]",
"tests/test_create.py::test_insert_dictionaries_and_lists_as_json[data_structure5]",
"tests/test_create.py::test_insert_uuid",
"tests/test_create.py::test_insert_memoryview",
"tests/test_create.py::test_insert_thousands_using_generator",
"tests/test_create.py::test_insert_ignore",
"tests/test_create.py::test_insert_hash_id",
"tests/test_create.py::test_vacuum",
"tests/test_create.py::test_works_with_pathlib_path",
"tests/test_create.py::test_cannot_provide_both_filename_and_memory",
"tests/test_create.py::test_creates_id_column",
"tests/test_create.py::test_drop",
"tests/test_create.py::test_drop_view",
"tests/test_create.py::test_insert_all_empty_list",
"tests/test_create.py::test_create_with_a_null_column",
"tests/test_create.py::test_create_with_nested_bytes"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-08-28 22:22:57+00:00
|
apache-2.0
| 5,501
|
|
kedro-org__kedro-3272
|
diff --git a/RELEASE.md b/RELEASE.md
index 90b8adf1..33dc1c28 100644
--- a/RELEASE.md
+++ b/RELEASE.md
@@ -6,6 +6,7 @@
* The new spaceflights starters, `spaceflights-pandas`, `spaceflights-pandas-viz`, `spaceflights-pyspark`, and `spaceflights-pyspark-viz` can be used with the `kedro new` command with the `--starter` flag.
* Added the `--conf-source` option to `%reload_kedro`, allowing users to specify a source for project configuration.
* Added the functionality to choose a merging strategy for config files loaded with `OmegaConfigLoader`.
+* Modified the mechanism of importing datasets, raise more explicit error when dependencies are missing.
## Bug fixes and other changes
diff --git a/kedro/io/core.py b/kedro/io/core.py
index e620d15f..f605c272 100644
--- a/kedro/io/core.py
+++ b/kedro/io/core.py
@@ -376,14 +376,14 @@ def parse_dataset_definition(
if "type" not in config:
raise DatasetError("'type' is missing from dataset catalog configuration")
- class_obj = config.pop("type")
- if isinstance(class_obj, str):
- if len(class_obj.strip(".")) != len(class_obj):
+ dataset_type = config.pop("type")
+ if isinstance(dataset_type, str):
+ if len(dataset_type.strip(".")) != len(dataset_type):
raise DatasetError(
"'type' class path does not support relative "
"paths or paths ending with a dot."
)
- class_paths = (prefix + class_obj for prefix in _DEFAULT_PACKAGES)
+ class_paths = (prefix + dataset_type for prefix in _DEFAULT_PACKAGES)
for class_path in class_paths:
tmp = _load_obj(class_path)
@@ -391,10 +391,7 @@ def parse_dataset_definition(
class_obj = tmp
break
else:
- raise DatasetError(
- f"Class '{class_obj}' not found or one of its dependencies "
- f"has not been installed."
- )
+ raise DatasetError(f"Class '{dataset_type}' not found, is this a typo?")
if not issubclass(class_obj, AbstractDataset):
raise DatasetError(
@@ -422,8 +419,9 @@ def parse_dataset_definition(
return class_obj, config
-def _load_obj(class_path: str) -> object | None:
+def _load_obj(class_path: str) -> Any | None:
mod_path, _, class_name = class_path.rpartition(".")
+ # Check if the module exists
try:
available_classes = load_obj(f"{mod_path}.__all__")
# ModuleNotFoundError: When `load_obj` can't find `mod_path` (e.g `kedro.io.pandas`)
@@ -432,18 +430,16 @@ def _load_obj(class_path: str) -> object | None:
# `__all__` attribute -- either because it's a custom or a kedro.io dataset
except (ModuleNotFoundError, AttributeError, ValueError):
available_classes = None
-
try:
class_obj = load_obj(class_path)
- except (ModuleNotFoundError, ValueError):
- return None
- except AttributeError as exc:
+ except (ModuleNotFoundError, ValueError, AttributeError) as exc:
+ # If it's available, module exist but dependencies are missing
if available_classes and class_name in available_classes:
raise DatasetError(
- f"{exc} Please see the documentation on how to "
+ f"{exc}. Please see the documentation on how to "
f"install relevant dependencies for {class_path}:\n"
- f"https://kedro.readthedocs.io/en/stable/"
- f"kedro_project_setup/dependencies.html"
+ f"https://docs.kedro.org/en/stable/kedro_project_setup/"
+ f"dependencies.html#install-dependencies-related-to-the-data-catalog"
) from exc
return None
diff --git a/kedro/utils.py b/kedro/utils.py
index 6067d96b..f527b909 100644
--- a/kedro/utils.py
+++ b/kedro/utils.py
@@ -23,6 +23,4 @@ def load_obj(obj_path: str, default_obj_path: str = "") -> Any:
obj_path = obj_path_list.pop(0) if len(obj_path_list) > 1 else default_obj_path
obj_name = obj_path_list[0]
module_obj = importlib.import_module(obj_path)
- if not hasattr(module_obj, obj_name):
- raise AttributeError(f"Object '{obj_name}' cannot be loaded from '{obj_path}'.")
return getattr(module_obj, obj_name)
|
kedro-org/kedro
|
6f4119f8912a4ba7b9f14980ff18a89a0dda9abd
|
diff --git a/tests/io/test_core.py b/tests/io/test_core.py
index dcf2f30a..1cbea798 100644
--- a/tests/io/test_core.py
+++ b/tests/io/test_core.py
@@ -19,6 +19,7 @@ from kedro.io.core import (
generate_timestamp,
get_filepath_str,
get_protocol_and_path,
+ parse_dataset_definition,
validate_on_forbidden_chars,
)
@@ -265,6 +266,32 @@ class TestCoreFunctions:
with pytest.raises(DatasetError, match=expected_error_message):
validate_on_forbidden_chars(**input)
+ def test_dataset_name_typo(self, mocker):
+ # If the module doesn't exist, it return None instead ModuleNotFoundError
+ mocker.patch("kedro.io.core.load_obj", return_value=None)
+ dataset_name = "lAmbDaDaTAsET"
+
+ with pytest.raises(
+ DatasetError, match=f"Class '{dataset_name}' not found, is this a typo?"
+ ):
+ parse_dataset_definition({"type": dataset_name})
+
+ def test_dataset_missing_dependencies(self, mocker):
+ # If the module is found but import the dataset trigger ModuleNotFoundError
+ dataset_name = "LambdaDataset"
+
+ def side_effect_function(value):
+ if "__all__" in value:
+ return [dataset_name]
+ else:
+ raise ModuleNotFoundError
+
+ mocker.patch("kedro.io.core.load_obj", side_effect=side_effect_function)
+
+ pattern = "Please see the documentation on how to install relevant dependencies"
+ with pytest.raises(DatasetError, match=pattern):
+ parse_dataset_definition({"type": dataset_name})
+
class TestAbstractVersionedDataset:
def test_version_str_repr(self, load_version, save_version):
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 1ca93067..34704513 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -18,12 +18,6 @@ class TestExtractObject:
extracted_obj = load_obj("DummyClass", "tests.test_utils")
assert extracted_obj is DummyClass
- def test_load_obj_invalid_attribute(self):
- with pytest.raises(
- AttributeError, match=r"Object 'InvalidClass' cannot be loaded"
- ):
- load_obj("InvalidClass", "tests.test_utils")
-
def test_load_obj_invalid_module(self):
with pytest.raises(ImportError, match=r"No module named 'missing_path'"):
load_obj("InvalidClass", "missing_path")
|
Make import failures in `kedro-datasets` clearer
## Description
Disambiguate the "module does not exist" error from the `ImportError` in messages like:
```
DatasetError: An exception occurred when parsing config for dataset 'companies':
Class 'polars.CSVDataSet' not found or one of its dependencies has not been installed.
```
This task will require investigation into how the error messages are swallowed, which will inform the proper implementation. Probably change: https://github.com/kedro-org/kedro/blob/4194fbd16a992af0320a395c0060aaaea356efb2/kedro/io/core.py#L433
## Context
This week I've been battling some puzzling documentation errors and after a while I noticed that, if the dependencies of a particular dataset are not present, the `ImportError` is swallowed silently. Examples:
https://github.com/kedro-org/kedro-plugins/blob/b8881d113f8082ff03e0233db3ae4557a4c32547/kedro-datasets/kedro_datasets/biosequence/__init__.py#L7-L8
https://github.com/kedro-org/kedro-plugins/blob/b8881d113f8082ff03e0233db3ae4557a4c32547/kedro-datasets/kedro_datasets/networkx/__init__.py#L8-L15
This was done in https://github.com/quantumblacklabs/private-kedro/pull/575 to solve https://github.com/quantumblacklabs/private-kedro/issues/563 at the same time dependencies were moved to `extras_require`.
I see how _not_ suppressing these errors could be extremely annoying back then, because `kedro.io` used to re-export all the datasets in its `__init__.py`:
https://github.com/quantumblacklabs/private-kedro/blob/f7dd2478aec4de1b46afbaded9bce3c69bff6304/kedro/io/__init__.py#L29-L47
```python
# kedro/io/__init__.py
"""``kedro.io`` provides functionality to read and write to a
number of data sets. At core of the library is ``AbstractDataSet``
which allows implementation of various ``AbstractDataSet``s.
"""
from .cached_dataset import CachedDataSet # NOQA
from .core import AbstractDataSet # NOQA
from .core import AbstractVersionedDataSet # NOQA
from .core import DataSetAlreadyExistsError # NOQA
from .core import DataSetError # NOQA
from .core import DataSetNotFoundError # NOQA
from .core import Version # NOQA
from .data_catalog import DataCatalog # NOQA
from .data_catalog_with_default import DataCatalogWithDefault # NOQA
from .lambda_data_set import LambdaDataSet # NOQA
from .memory_data_set import MemoryDataSet # NOQA
from .partitioned_data_set import IncrementalDataSet # NOQA
from .partitioned_data_set import PartitionedDataSet # NOQA
from .transformers import AbstractTransformer # NOQA
```
However, now our `__init__.py` is empty and datasets are meant to be imported separately:
https://github.com/kedro-org/kedro-plugins/blob/b8881d113f8082ff03e0233db3ae4557a4c32547/kedro-datasets/kedro_datasets/__init__.py#L1-L3
So I think it would be much better if we did _not_ silence those import errors.
## More context
If one dependency is missing, the user would get an unhelpful "module X has no attribute Y" when trying to import a dataset rather than an actual error:
```
> pip uninstall biopython (kedro-dev)
Found existing installation: biopython 1.81
Uninstalling biopython-1.81:
Would remove:
/Users/juan_cano/.micromamba/envs/kedro-dev/lib/python3.10/site-packages/Bio/*
/Users/juan_cano/.micromamba/envs/kedro-dev/lib/python3.10/site-packages/BioSQL/*
/Users/juan_cano/.micromamba/envs/kedro-dev/lib/python3.10/site-packages/biopython-1.81.dist-info/*
Proceed (Y/n)? y
Successfully uninstalled biopython-1.81
> python (kedro-dev)
Python 3.10.9 | packaged by conda-forge | (main, Feb 2 2023, 20:26:08) [Clang 14.0.6 ] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> from kedro_datasets.biosequence import BioSequenceDataSet
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ImportError: cannot import name 'BioSequenceDataSet' from 'kedro_datasets.biosequence' (/Users/juan_cano/.micromamba/envs/kedro-dev/lib/python3.10/site-packages/kedro_datasets/biosequence/__init__.py)
```
|
0.0
|
6f4119f8912a4ba7b9f14980ff18a89a0dda9abd
|
[
"tests/io/test_core.py::TestCoreFunctions::test_dataset_name_typo",
"tests/io/test_core.py::TestCoreFunctions::test_dataset_missing_dependencies"
] |
[
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[1]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[True]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[False]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[0]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[0.0]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[0j]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var6]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var7]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var9]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var10]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var11]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var12]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation[var13]",
"tests/io/test_core.py::TestCoreFunctions::test_str_representation_none",
"tests/io/test_core.py::TestCoreFunctions::test_get_filepath_str",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[s3://bucket/file.txt-expected_result0]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[s3://user@BUCKET/file.txt-expected_result1]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[gcs://bucket/file.txt-expected_result2]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[gs://bucket/file.txt-expected_result3]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[adl://bucket/file.txt-expected_result4]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[abfs://bucket/file.txt-expected_result5]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[abfss://bucket/file.txt-expected_result6]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[abfss://[email protected]/mypath-expected_result7]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[hdfs://namenode:8020/file.txt-expected_result8]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[file:///tmp/file.txt-expected_result9]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[/tmp/file.txt-expected_result10]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[C:\\\\Projects\\\\file.txt-expected_result11]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[file:///C:\\\\Projects\\\\file.txt-expected_result12]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[https://example.com/file.txt-expected_result13]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path[http://example.com/file.txt-expected_result14]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path_http_with_version[http://example.com/file.txt]",
"tests/io/test_core.py::TestCoreFunctions::test_get_protocol_and_path_http_with_version[https://example.com/file.txt]",
"tests/io/test_core.py::TestCoreFunctions::test_validate_forbidden_chars[input0]",
"tests/io/test_core.py::TestCoreFunctions::test_validate_forbidden_chars[input1]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_version_str_repr[None-None]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_save_and_load[None-None]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_resolve_save_version",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_no_versions[None-None]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_local_exists",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_exists_general_exception",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_exists[None-None]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_prevent_overwrite[None-None]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_save_version_warning[2019-01-02T00.00.00.000Z-2019-01-01T23.59.59.999Z]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_versioning_existing_dataset[None-None-None-None]",
"tests/io/test_core.py::TestAbstractVersionedDataset::test_cache_release[None-None]",
"tests/test_utils.py::TestExtractObject::test_load_obj",
"tests/test_utils.py::TestExtractObject::test_load_obj_default_path",
"tests/test_utils.py::TestExtractObject::test_load_obj_invalid_module"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-06 05:14:09+00:00
|
apache-2.0
| 3,411
|
|
piotrmaslanka__satella-11
|
diff --git a/satella/coding/recast_exceptions.py b/satella/coding/recast_exceptions.py
index bf164db4..32e4e1c8 100644
--- a/satella/coding/recast_exceptions.py
+++ b/satella/coding/recast_exceptions.py
@@ -45,7 +45,8 @@ class rethrow_as(object):
"""
# You can also provide just two exceptions
- if len(pairs) == 2 and all(issubclass(p, BaseException) for p in pairs):
+ if len(pairs) == 2 and not isinstance(pairs[1], (tuple, list)) \
+ and all(issubclass(p, BaseException) for p in pairs):
self.mapping = {pairs[0]: pairs[1]}
else:
self.mapping = dict(pairs)
|
piotrmaslanka/satella
|
5fa0a67e4d35431f2f54740ba8fcfbd7f6d8bc59
|
diff --git a/tests/test_coding/test_rethrow.py b/tests/test_coding/test_rethrow.py
index ce17f722..80dd8ec6 100644
--- a/tests/test_coding/test_rethrow.py
+++ b/tests/test_coding/test_rethrow.py
@@ -39,4 +39,17 @@ class TestStuff(unittest.TestCase):
def lol():
raise ValueError()
- self.assertRaises(NameError, lol)
\ No newline at end of file
+ self.assertRaises(NameError, lol)
+
+ def test_issue_10(self):
+
+ class WTFException1(Exception): pass
+ class WTFException2(Exception): pass
+
+ @rethrow_as((NameError, WTFException1),
+ (TypeError, WTFException2))
+ def provide(exc):
+ raise exc()
+
+ self.assertRaises(WTFException1, lambda: provide(NameError))
+ self.assertRaises(WTFException2, lambda: provide(TypeError))
|
Bad rethrow_as
```python
@rethrow_as((UnicodeDecodeError, ConfigurationMalformed),
(json.JSONDecodeError, ConfigurationMalformed))
@rethrow_as(ValueError, ConfigurationMalformed)
@rethrow_as(binascii.Error, ConfigurationMalformed)
@rethrow_as(TypeError, ConfigurationError)
def provide(self):
return json.loads(self.root, encoding=self.encoding)
```
breaks it by treating two first pairs in a wrong way
|
0.0
|
5fa0a67e4d35431f2f54740ba8fcfbd7f6d8bc59
|
[
"tests/test_coding/test_rethrow.py::TestStuff::test_issue_10"
] |
[
"tests/test_coding/test_rethrow.py::TestStuff::test_rethrow",
"tests/test_coding/test_rethrow.py::TestStuff::test_rethrow_2",
"tests/test_coding/test_rethrow.py::TestStuff::test_rethrow_3",
"tests/test_coding/test_rethrow.py::TestStuff::test_silencer",
"tests/test_coding/test_rethrow.py::TestStuff::test_silencer_2"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-02-16 16:59:11+00:00
|
bsd-3-clause
| 4,571
|
|
PlasmaControl__DESC-613
|
diff --git a/desc/random.py b/desc/random.py
new file mode 100644
index 000000000..e40213d43
--- /dev/null
+++ b/desc/random.py
@@ -0,0 +1,198 @@
+"""Utilities for creating random surfaces and profiles."""
+
+import numpy as np
+import scipy.optimize
+import scipy.stats
+from numpy.random import default_rng
+
+from desc.backend import jnp, sign
+from desc.basis import DoubleFourierSeries
+from desc.derivatives import Derivative
+from desc.geometry import FourierRZToroidalSurface
+from desc.profiles import PowerSeriesProfile
+from desc.utils import setdefault
+
+
+def random_surface(
+ M=8,
+ N=8,
+ R0=(1, 10),
+ R_scale=(0.5, 2),
+ Z_scale=(0.5, 2),
+ NFP=(1, 10),
+ sym=None,
+ alpha=(1, 4),
+ beta=(1, 4),
+ rng=None,
+):
+ """Create a "random" toroidal surface.
+
+ Uses a double Fourier series representation with random coefficients.
+ The coefficients are given by
+
+ X_mn = X_scale * X_norm * N(1, exp(-beta))
+
+ Where N(m,s) is a normal random variable on with mean m and stdev s, and
+ X_norm = exp(-alpha*(|m| + |n|)) / exp(-alpha)
+
+
+ Parameters
+ ----------
+ M, N : int
+ Poloidal and toroidal resolution of the double Fourier series.
+ R0 : float or tuple
+ Major radius. If a tuple, treats as min/max for random value.
+ R_scale, Z_scale : float or tuple
+ Scale factors for R and Z coordinates. If a tuple, treats as min/max for random
+ values. The aspect ratio of the surface will be approximately
+ R0/sqrt(R_scale*Z_scale)
+ NFP : int or tuple
+ Number of field periods. If a tuple, treats as min/max for random int
+ sym : bool or None
+ Whether the surface should be stellarator symmetric. If None, selects randomly.
+ alpha : int or tuple
+ Spectral decay factor. Larger values of alpha will tend to create simpler
+ surfaces. If a tuple, treats as min/max for random int.
+ beta : int or tuple
+ Relative standard deviation for spectral coefficients. Larger values of beta
+ will tend to create simpler surfaces. If a tuple, treats as min/max for
+ random int.
+ rng : numpy.random.Generator
+ Random number generator. If None, uses numpy's default_rng
+
+ Returns
+ -------
+ surf : FourierRZToroidalSurface
+ Random toroidal surface.
+ """
+ rng = setdefault(rng, default_rng())
+ sym = setdefault(sym, rng.choice([True, False]))
+ if isinstance(alpha, tuple):
+ alpha = rng.integers(alpha[0], alpha[1] + 1)
+ if isinstance(beta, tuple):
+ beta = rng.integers(beta[0], beta[1] + 1)
+ if isinstance(NFP, tuple):
+ NFP = rng.integers(NFP[0], NFP[1] + 1)
+ if isinstance(R_scale, tuple):
+ R_scale = (R_scale[1] - R_scale[0]) * rng.random() + R_scale[0]
+ if isinstance(Z_scale, tuple):
+ Z_scale = (Z_scale[1] - Z_scale[0]) * rng.random() + Z_scale[0]
+ if isinstance(R0, tuple):
+ R0 = (R0[1] - R0[0]) * rng.random() + R0[0]
+
+ R_basis = DoubleFourierSeries(M=M, N=N, NFP=NFP, sym="cos" if sym else False)
+ Z_basis = DoubleFourierSeries(M=M, N=N, NFP=NFP, sym="sin" if sym else False)
+ # alpha determines how quickly amplitude decays for high M, N,
+ # normalized so that X_norm=1 for m=1
+ R_norm = np.exp(-alpha * np.sum(abs(R_basis.modes), axis=-1)) / np.exp(-alpha)
+ Z_norm = np.exp(-alpha * np.sum(abs(Z_basis.modes), axis=-1)) / np.exp(-alpha)
+
+ R_mn = R_norm * scipy.stats.truncnorm.rvs(
+ loc=1, scale=np.exp(-beta), size=R_basis.num_modes, a=-2, b=2, random_state=rng
+ )
+ Z_mn = Z_norm * scipy.stats.truncnorm.rvs(
+ loc=1, scale=np.exp(-beta), size=Z_basis.num_modes, a=-2, b=2, random_state=rng
+ )
+
+ # scale to approximate aspect ratio
+ R_scale1 = np.mean(
+ abs(R_mn)[(abs(R_basis.modes[:, 1]) == 1) & (abs(R_basis.modes[:, 2]) == 0)]
+ )
+ Z_scale1 = np.mean(
+ abs(Z_mn)[(abs(Z_basis.modes[:, 1]) == 1) & (abs(Z_basis.modes[:, 2]) == 0)]
+ )
+ R_mn *= R_scale / R_scale1
+ Z_mn *= Z_scale / Z_scale1
+ R_mn[R_basis.get_idx(0, 0, 0)] = R0
+ if not sym:
+ Z_mn[Z_basis.get_idx(0, 0, 0)] = 0 # center at Z=0
+ # flip sign and reduce magnitude of non-symmetric modes to avoid degenerate
+ # cases with no volume. kind of ad-hoc but seems to produce reasonable results
+ R_mn[sign(R_basis.modes[:, 1]) != sign(R_basis.modes[:, 2])] *= -np.exp(-beta)
+ Z_mn[sign(Z_basis.modes[:, 1]) == sign(Z_basis.modes[:, 2])] *= -np.exp(-beta)
+
+ surf = FourierRZToroidalSurface(
+ R_mn,
+ Z_mn,
+ R_basis.modes[:, 1:],
+ Z_basis.modes[:, 1:],
+ NFP=NFP,
+ sym=sym,
+ check_orientation=False,
+ )
+ # we do this manually just to avoid the warning when creating with left handed
+ # coordinates
+ if surf._compute_orientation() == -1:
+ surf._flip_orientation()
+ assert surf._compute_orientation() == 1
+ return surf
+
+
+def random_pressure(L=8, p0=(1e3, 1e4), rng=None):
+ """Create a random monotonic pressure profile.
+
+ Profile will be a PowerSeriesProfile with even symmetry,
+ enforced to be monotonically decreasing from p0 at r=0 to 0 at r=1
+
+ Could also be used for other monotonically decreasing profiles
+ such as temperature or density.
+
+ Parameters
+ ----------
+ L : int
+ Order of polynomial.
+ p0 : float or tuple
+ Pressure on axis. If a tuple, treats as min/max for random value.
+ rng : numpy.random.Generator
+ Random number generator. If None, uses numpy's default_rng
+
+ Returns
+ -------
+ pressure : PowerSeriesProfile
+ Random pressure profile.
+ """
+ assert (L // 2) == (L / 2), "L should be even"
+ rng = setdefault(rng, default_rng())
+ if isinstance(p0, tuple):
+ p0 = rng.uniform(p0[0], p0[1])
+
+ # first create random even coeffs
+ p = 1 - 2 * rng.random(L // 2 + 1)
+ # make it sum to 0 -> p=0 at r=1
+ p[0] -= p.sum()
+ # make p(0) = 1
+ p = p / p[0]
+ # this inserts zeros for all the odd modes
+ p1 = jnp.vstack([p, jnp.zeros_like(p)]).flatten(order="F")[::-1]
+ r = jnp.linspace(0, 1, 40)
+ y = jnp.polyval(p1, r)
+
+ def fun(x):
+ x = jnp.vstack([x, jnp.zeros_like(x)]).flatten(order="F")[::-1]
+ y_ = jnp.polyval(x, r)
+ return jnp.sum((y - y_) ** 2)
+
+ # constrain it so that it is monotonically decreasing, goes through (0,1) and (1,0)
+ def con(x):
+ x = jnp.vstack([x, jnp.zeros_like(x)]).flatten(order="F")[::-1]
+ dx = jnp.polyder(x, 1)
+ dy = jnp.polyval(dx, r)
+ return jnp.concatenate([dy, jnp.atleast_1d(jnp.sum(x)), jnp.atleast_1d(x[-1])])
+
+ hess = Derivative(fun, mode="hess")
+ grad = Derivative(fun, mode="grad")
+ A = Derivative(con, mode="fwd")(0 * p)
+ l = np.concatenate([-np.inf * np.ones_like(r), jnp.array([0, 1])])
+ u = np.concatenate([np.zeros_like(r), jnp.array([0, 1])])
+
+ out = scipy.optimize.minimize(
+ fun,
+ p,
+ jac=grad,
+ hess=hess,
+ constraints=scipy.optimize.LinearConstraint(A, l, u),
+ method="trust-constr",
+ )
+
+ p = np.vstack([out.x, np.zeros_like(out.x)]).flatten(order="F")
+ return PowerSeriesProfile(p[::2] * p0, modes=np.arange(L + 1)[::2], sym=True)
|
PlasmaControl/DESC
|
61797b69e6991b0284e76e0e493d9240edc779e0
|
diff --git a/tests/test_random.py b/tests/test_random.py
new file mode 100644
index 000000000..c2390a275
--- /dev/null
+++ b/tests/test_random.py
@@ -0,0 +1,81 @@
+"""Tests for random surfaces, profiles etc."""
+import numpy as np
+import pytest
+
+from desc.equilibrium import Equilibrium
+from desc.grid import LinearGrid
+from desc.random import random_pressure, random_surface
+
+
[email protected]
+def test_random_pressure():
+ """Test that randomly generated profile is monotonic, has correct scaling etc."""
+ rng = np.random.default_rng(0)
+ p = random_pressure(L=8, p0=(1e3, 1e4), rng=rng)
+ assert p.basis.sym == "even"
+ assert 1e3 <= p(np.array([0.0])) <= 1e4
+ assert p.basis.L == 8 # symmetric, so should be 4 params up to order 8
+ dp = p(np.linspace(0, 1, 10), dr=1)
+ assert np.all(dp <= 0) # can't use array_less because that doesn't do <=
+
+
[email protected]
+def test_random_surface():
+ """Test that randomly generated surface is "sensible"."""
+ rng = np.random.default_rng(0)
+ surf = random_surface(
+ M=4,
+ N=4,
+ R0=(5, 10),
+ R_scale=(0.5, 2),
+ Z_scale=(0.5, 2),
+ NFP=(1, 3),
+ sym=True,
+ alpha=(1, 4),
+ beta=(1, 4),
+ rng=rng,
+ )
+ assert surf.sym
+ assert 1 <= surf.NFP <= 3
+ assert surf.M == 4
+ assert surf.N == 4
+ assert surf._compute_orientation() == 1
+
+ eq = Equilibrium(surface=surf)
+ R0 = eq.compute("R0")["R0"]
+ assert 5 <= R0 <= 10
+ AR = eq.compute("R0/a")["R0/a"]
+ # should be ~ R0/sqrt(R_scale*Z_scale), allowing for random variation
+ assert 2.5 <= AR <= 20
+ assert eq.is_nested()
+
+ # same stuff for non-symmetric
+ rng = np.random.default_rng(0)
+ surf = random_surface(
+ M=4,
+ N=4,
+ R0=(5, 10),
+ R_scale=(0.5, 2),
+ Z_scale=(0.5, 2),
+ NFP=(1, 3),
+ sym=False,
+ alpha=(1, 4),
+ beta=(1, 4),
+ rng=rng,
+ )
+ assert not surf.sym
+ assert 1 <= surf.NFP <= 3
+ assert surf.M == 4
+ assert surf.N == 4
+ assert surf._compute_orientation() == 1
+
+ eq = Equilibrium(surface=surf)
+ R0 = eq.compute("R0")["R0"]
+ assert 5 <= R0 <= 10
+ Z0 = eq.compute("Z", grid=LinearGrid(rho=np.array([0]), M=0, N=8, NFP=eq.NFP))["Z"]
+ # should be centered around Z=0
+ np.testing.assert_allclose(np.mean(Z0), 0, atol=1e-14)
+ AR = eq.compute("R0/a")["R0/a"]
+ # should be ~ R0/sqrt(R_scale*Z_scale), allowing for random variation
+ assert 2.5 <= AR <= 20
+ assert eq.is_nested()
|
Add utilites to create "random" equilibria
For database studies, testing, examples etc, it would be nice to be able to generate some "random" equilibria that satisfy certain basic criteria, such as major/minor radius, field strength, beta, etc.
- [ ] Random surface with approximate shape that ideally doesn't self intersect
- [ ] Random monotonic profiles
- [ ] Random "smooth" non-monotonic profiles
|
0.0
|
61797b69e6991b0284e76e0e493d9240edc779e0
|
[
"tests/test_random.py::test_random_pressure",
"tests/test_random.py::test_random_surface"
] |
[] |
{
"failed_lite_validators": [
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-07 18:21:19+00:00
|
mit
| 460
|
|
ministryofjustice__mt940-writer-12
|
diff --git a/mt940_writer.py b/mt940_writer.py
index b6a3ad9..f3577ac 100644
--- a/mt940_writer.py
+++ b/mt940_writer.py
@@ -58,11 +58,12 @@ class Balance:
class Transaction:
- def __init__(self, date, amount, transaction_type, narrative):
+ def __init__(self, date, amount, transaction_type, narrative, additional_info=None):
self.date = date
self.amount = amount
self.transaction_type = transaction_type
self.narrative = narrative
+ self.additional_info = additional_info
def __str__(self):
return '{value_date}{entry_date}{category}{amount}{type_code}{narrative}'.format(
@@ -75,6 +76,18 @@ class Transaction:
)
+class TransactionAdditionalInfo:
+ def __init__(self, information):
+ self.information = information
+
+ def __str__(self):
+ return '{information}'.format(
+ information=self.information,
+ )
+
+ def __bool__(self):
+ return bool(self.information)
+
class Statement:
def __init__(self, reference_number, account, statement_number, opening_balance, closing_balance, transactions):
self.reference_number = reference_number
@@ -91,6 +104,8 @@ class Statement:
yield ':60F:%s' % self.opening_balance
for transaction in self.transactions:
yield ':61:%s' % transaction
+ if transaction.additional_info:
+ yield ':86:%s' % transaction.additional_info
yield ':62F:%s' % self.closing_balance
def __str__(self):
|
ministryofjustice/mt940-writer
|
899a25186b9fe12e60f169789de72ade5aa5fbf5
|
diff --git a/tests/test_mt940_writer.py b/tests/test_mt940_writer.py
index cf4e232..aa7afd7 100644
--- a/tests/test_mt940_writer.py
+++ b/tests/test_mt940_writer.py
@@ -5,19 +5,6 @@ from unittest import TestCase
import mt940_writer as mt940
-EXPECTED_OUTPUT = (
- ':20:59716\n'
- ':25:80008000 102030\n'
- ':28C:1/1\n'
- ':60F:C160922GBP12,99\n'
- ':61:1609220922C1,00NTRFPayment 1\n'
- ':61:1609220922C1,00NMSCPayment 2\n'
- ':61:1609220922C1,00NTRFPayment 3\n'
- ':61:1609220922D0,99NMSCPayment 4\n'
- ':62F:C160922GBP15,00'
-)
-
-
class MT940WriterTestCase(TestCase):
def test_write_statement(self):
@@ -32,6 +19,18 @@ class MT940WriterTestCase(TestCase):
mt940.Transaction(stmt_date, Decimal('-0.99'), mt940.TransactionType.miscellaneous, 'Payment 4')
]
+ expected_output = (
+ ':20:59716\n'
+ ':25:80008000 102030\n'
+ ':28C:1/1\n'
+ ':60F:C160922GBP12,99\n'
+ ':61:1609220922C1,00NTRFPayment 1\n'
+ ':61:1609220922C1,00NMSCPayment 2\n'
+ ':61:1609220922C1,00NTRFPayment 3\n'
+ ':61:1609220922D0,99NMSCPayment 4\n'
+ ':62F:C160922GBP15,00'
+ )
+
statement = mt940.Statement(
'59716',
account,
@@ -41,4 +40,42 @@ class MT940WriterTestCase(TestCase):
transactions
)
- self.assertEqual(str(statement), EXPECTED_OUTPUT)
+ self.assertEqual(str(statement), expected_output)
+ def test_write_statement_with_additional_transaction_info(self):
+ stmt_date = date(2016, 9, 22)
+ account = mt940.Account('80008000', '102030')
+ opening_balance = mt940.Balance(Decimal('12.99'), stmt_date, 'GBP')
+ closing_balance = mt940.Balance(Decimal('15'), stmt_date, 'GBP')
+ transactions = [
+ mt940.Transaction(stmt_date, Decimal('1'), mt940.TransactionType.transfer, 'Payment 1',
+ mt940.TransactionAdditionalInfo('ADDITIONAL/DATA/1')),
+ mt940.Transaction(stmt_date, Decimal('1'), mt940.TransactionType.miscellaneous, 'Payment 2',
+ mt940.TransactionAdditionalInfo('ADDITIONAL/DATA/2')),
+ mt940.Transaction(stmt_date, Decimal('1'), mt940.TransactionType.transfer, 'Payment 3',
+ mt940.TransactionAdditionalInfo('')),
+ mt940.Transaction(stmt_date, Decimal('-0.99'), mt940.TransactionType.miscellaneous, 'Payment 4')
+ ]
+
+ expected_output = (
+ ':20:59716\n'
+ ':25:80008000 102030\n'
+ ':28C:1/1\n'
+ ':60F:C160922GBP12,99\n'
+ ':61:1609220922C1,00NTRFPayment 1\n'
+ ':86:ADDITIONAL/DATA/1\n'
+ ':61:1609220922C1,00NMSCPayment 2\n'
+ ':86:ADDITIONAL/DATA/2\n'
+ ':61:1609220922C1,00NTRFPayment 3\n'
+ ':61:1609220922D0,99NMSCPayment 4\n'
+ ':62F:C160922GBP15,00'
+ )
+ statement = mt940.Statement(
+ '59716',
+ account,
+ '1/1',
+ opening_balance,
+ closing_balance,
+ transactions
+ )
+
+ self.assertEqual(str(statement), expected_output)
|
add support for Tag 86
<img width="651" alt="image" src="https://github.com/ministryofjustice/mt940-writer/assets/20453622/fb184e68-7eaf-4b13-89c4-f6075d2f28b8">
<img width="474" alt="image" src="https://github.com/ministryofjustice/mt940-writer/assets/20453622/6e08494e-b5f5-41f5-8c97-50de4b552165">
|
0.0
|
899a25186b9fe12e60f169789de72ade5aa5fbf5
|
[
"tests/test_mt940_writer.py::MT940WriterTestCase::test_write_statement_with_additional_transaction_info"
] |
[
"tests/test_mt940_writer.py::MT940WriterTestCase::test_write_statement"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-11-15 14:20:55+00:00
|
mit
| 3,943
|
|
dfm__tess-atlas-108
|
diff --git a/setup.py b/setup.py
index 0a7c68b..dabe2f7 100644
--- a/setup.py
+++ b/setup.py
@@ -31,15 +31,17 @@ INSTALL_REQUIRES = [
"lightkurve>=2.0.11",
"plotly>=4.9.0",
"arviz>=0.10.0",
- "corner",
+ "corner>=2.2.1",
"pandas",
"jupyter",
"ipykernel",
- "jupytext",
+ "jupytext<1.11,>=1.8", # pinned for jypyter-book
"kaleido",
"aesara-theano-fallback",
"theano-pymc>=1.1.2",
"jupyter-book",
+ "seaborn",
+ "jupyter_client==6.1.12", # pinned beacuse of nbconvert bug https://github.com/jupyter/nbconvert/pull/1549#issuecomment-818734169
]
EXTRA_REQUIRE = {"test": ["pytest>=3.6", "testbook>=0.2.3"]}
EXTRA_REQUIRE["dev"] = EXTRA_REQUIRE["test"] + [
@@ -101,6 +103,7 @@ if __name__ == "__main__":
"run_tois=tess_atlas.notebook_preprocessors.run_tois:main",
"runs_stats_plotter=tess_atlas.analysis.stats_plotter:main",
"make_webpages=tess_atlas.webbuilder.build_pages:main",
+ "make_slurm_job=tess_atlas.batch_job_generator.slurm_job_generator:main",
]
},
)
diff --git a/src/tess_atlas/batch_job_generator/__init__.py b/src/tess_atlas/batch_job_generator/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/tess_atlas/batch_job_generator/slurm_job_generator.py b/src/tess_atlas/batch_job_generator/slurm_job_generator.py
new file mode 100644
index 0000000..a1ed5a4
--- /dev/null
+++ b/src/tess_atlas/batch_job_generator/slurm_job_generator.py
@@ -0,0 +1,67 @@
+import argparse
+import os
+import shutil
+from typing import List
+
+import pandas as pd
+
+TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), "slurm_template.sh")
+
+
+def make_slurm_file(outdir: str, toi_numbers: List[int], module_loads: str):
+ with open(TEMPLATE_FILE, "r") as f:
+ file_contents = f.read()
+ outdir = os.path.abspath(outdir)
+ logfile_name = os.path.join(outdir, "toi_slurm_jobs.log")
+ jobfile_name = os.path.join(outdir, "slurm_job.sh")
+ path_to_python = shutil.which("python")
+ path_to_env_activate = path_to_python.replace("python", "activate")
+ file_contents = file_contents.replace(
+ "{{{TOTAL NUM}}}", str(len(toi_numbers) - 1)
+ )
+ file_contents = file_contents.replace("{{{MODULE LOADS}}}", module_loads)
+ file_contents = file_contents.replace("{{{OUTDIR}}}", outdir)
+ file_contents = file_contents.replace(
+ "{{{LOAD ENV}}}", f"source {path_to_env_activate}"
+ )
+ file_contents = file_contents.replace("{{{LOG FILE}}}", logfile_name)
+ toi_str = " ".join([str(toi) for toi in toi_numbers])
+ file_contents = file_contents.replace("{{{TOI NUMBERS}}}", toi_str)
+ with open(jobfile_name, "w") as f:
+ f.write(file_contents)
+ print(f"Jobfile created, to run job: \nsbatch {jobfile_name}")
+
+
+def get_toi_numbers(toi_csv: str):
+ df = pd.read_csv(toi_csv)
+ return list(df.toi_numbers.values)
+
+
+def get_cli_args():
+ parser = argparse.ArgumentParser(
+ description="Create slurm job for analysing TOIs"
+ )
+ parser.add_argument(
+ "--toi_csv",
+ help="CSV with the toi numbers to analyse (csv needs a column with `toi_numbers`)",
+ )
+ parser.add_argument(
+ "--outdir", help="outdir for jobs", default="notebooks"
+ )
+ parser.add_argument(
+ "--module_loads",
+ help="String containing all module loads in one line (each module separated by a space)",
+ )
+ args = parser.parse_args()
+ return args.toi_csv, args.outdir, args.module_loads
+
+
+def main():
+ toi_csv, outdir, module_loads = get_cli_args()
+ os.makedirs(outdir, exist_ok=True)
+ toi_numbers = get_toi_numbers(toi_csv)
+ make_slurm_file(outdir, toi_numbers, module_loads)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/tess_atlas/batch_job_generator/slurm_template.sh b/src/tess_atlas/batch_job_generator/slurm_template.sh
new file mode 100644
index 0000000..250c930
--- /dev/null
+++ b/src/tess_atlas/batch_job_generator/slurm_template.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+#SBATCH --job-name=run_tois
+#SBATCH --output={{{LOG FILE}}}
+#
+#SBATCH --ntasks=1
+#SBATCH --time=300:00
+#SBATCH --mem-per-cpu=500MB
+#
+#SBATCH --array=0-{{{TOTAL NUM}}}
+
+module load {{{MODULE LOADS}}}
+{{{LOAD ENV}}}
+
+
+TOI_NUMBERS=({{{TOI NUMBERS}}})
+
+srun run_toi ${TOI_NUMBERS[$SLURM_ARRAY_TASK_ID]} --outdir {{{OUTDIR}}}
diff --git a/src/tess_atlas/notebook_preprocessors/run_toi.py b/src/tess_atlas/notebook_preprocessors/run_toi.py
index 1f17ba7..a76c4f5 100644
--- a/src/tess_atlas/notebook_preprocessors/run_toi.py
+++ b/src/tess_atlas/notebook_preprocessors/run_toi.py
@@ -87,7 +87,7 @@ def execute_toi_notebook(notebook_filename):
def get_cli_args():
"""Get the TOI number from the CLI and return it"""
- parser = argparse.ArgumentParser(prog="run_toi_in_pool")
+ parser = argparse.ArgumentParser(prog="run_toi")
default_outdir = os.path.join(os.getcwd(), "notebooks")
parser.add_argument(
"toi_number", type=int, help="The TOI number to be analysed (e.g. 103)"
|
dfm/tess-atlas
|
6274de545082661de3677fb609fa7274f26afb47
|
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index d45d0c3..ca420da 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -26,7 +26,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- pip install --use-feature=2020-resolver -U -e ".[dev]"
+ pip install -U -e ".[dev]"
# Test the py:light -> ipynb -> py:light round trip conversion
- name: roundtrip conversion test
diff --git a/tests/test_slurm_job_generator.py b/tests/test_slurm_job_generator.py
new file mode 100644
index 0000000..59a582a
--- /dev/null
+++ b/tests/test_slurm_job_generator.py
@@ -0,0 +1,24 @@
+import os
+import unittest
+
+from tess_atlas.batch_job_generator.slurm_job_generator import make_slurm_file
+
+
+class JobgenTest(unittest.TestCase):
+ def setUp(self):
+ self.start_dir = os.getcwd()
+ self.outdir = f"test_jobgen"
+ os.makedirs(self.outdir, exist_ok=True)
+
+ def tearDown(self):
+ import shutil
+
+ if os.path.exists(self.outdir):
+ shutil.rmtree(self.outdir)
+
+ def test_slurmfile(self):
+ make_slurm_file(self.outdir, [100, 101, 102], "module load 1")
+
+
+if __name__ == "__main__":
+ unittest.main()
|
Cluster jobs failing to pre-process notebooks: TypeError: 'coroutine' object is not subscriptable
|
0.0
|
6274de545082661de3677fb609fa7274f26afb47
|
[
"tests/test_slurm_job_generator.py::JobgenTest::test_slurmfile"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-06 02:54:35+00:00
|
mit
| 1,902
|
|
tobi-wan-kenobi__bumblebee-status-714
|
diff --git a/bumblebee_status/modules/contrib/arch_update.py b/bumblebee_status/modules/contrib/arch_update.py
new file mode 120000
index 0000000..57fd99f
--- /dev/null
+++ b/bumblebee_status/modules/contrib/arch_update.py
@@ -0,0 +1,1 @@
+arch-update.py
\ No newline at end of file
diff --git a/bumblebee_status/modules/contrib/battery_upower.py b/bumblebee_status/modules/contrib/battery_upower.py
new file mode 120000
index 0000000..4a7bb68
--- /dev/null
+++ b/bumblebee_status/modules/contrib/battery_upower.py
@@ -0,0 +1,1 @@
+battery-upower.py
\ No newline at end of file
diff --git a/bumblebee_status/modules/contrib/layout_xkbswitch.py b/bumblebee_status/modules/contrib/layout_xkbswitch.py
new file mode 120000
index 0000000..e7d6b94
--- /dev/null
+++ b/bumblebee_status/modules/contrib/layout_xkbswitch.py
@@ -0,0 +1,1 @@
+layout-xkbswitch.py
\ No newline at end of file
diff --git a/bumblebee_status/modules/core/layout_xkb.py b/bumblebee_status/modules/core/layout_xkb.py
new file mode 120000
index 0000000..f2e8037
--- /dev/null
+++ b/bumblebee_status/modules/core/layout_xkb.py
@@ -0,0 +1,1 @@
+layout-xkb.py
\ No newline at end of file
diff --git a/docs/development/module.rst b/docs/development/module.rst
index 1d6e716..113a6f7 100644
--- a/docs/development/module.rst
+++ b/docs/development/module.rst
@@ -11,6 +11,7 @@ Adding a new module to ``bumblebee-status`` is straight-forward:
``bumblebee-status`` (i.e. a module called
``bumblebee_status/modules/contrib/test.py`` will be loaded using
``bumblebee-status -m test``)
+- The module name must follow the `Python Naming Conventions <https://www.python.org/dev/peps/pep-0008/#package-and-module-names>`_
- See below for how to actually write the module
- Test (run ``bumblebee-status`` in the CLI)
- Make sure your changes don’t break anything: ``./coverage.sh``
|
tobi-wan-kenobi/bumblebee-status
|
96f8e92822f8b72287ef97bbdd9a0c9bf1a063da
|
diff --git a/tests/modules/contrib/test_arch-update.py b/tests/modules/contrib/test_arch-update.py
index 6a1c172..b11187b 100644
--- a/tests/modules/contrib/test_arch-update.py
+++ b/tests/modules/contrib/test_arch-update.py
@@ -3,3 +3,5 @@ import pytest
def test_load_module():
__import__("modules.contrib.arch-update")
+def test_load_symbolic_link_module():
+ __import__("modules.contrib.arch_update")
diff --git a/tests/modules/contrib/test_battery-upower.py b/tests/modules/contrib/test_battery-upower.py
index cb62a16..d129679 100644
--- a/tests/modules/contrib/test_battery-upower.py
+++ b/tests/modules/contrib/test_battery-upower.py
@@ -5,3 +5,6 @@ pytest.importorskip("dbus")
def test_load_module():
__import__("modules.contrib.battery-upower")
+def test_load_symbolic_link_module():
+ __import__("modules.contrib.battery_upower")
+
diff --git a/tests/modules/contrib/test_layout-xkbswitch.py b/tests/modules/contrib/test_layout-xkbswitch.py
index 08cfd96..b709254 100644
--- a/tests/modules/contrib/test_layout-xkbswitch.py
+++ b/tests/modules/contrib/test_layout-xkbswitch.py
@@ -3,3 +3,5 @@ import pytest
def test_load_module():
__import__("modules.contrib.layout-xkbswitch")
+def test_load_symbolic_link_module():
+ __import__("modules.contrib.layout_xkbswitch")
diff --git a/tests/modules/core/test_layout-xkb.py b/tests/modules/core/test_layout-xkb.py
index 8eacfad..852b9da 100644
--- a/tests/modules/core/test_layout-xkb.py
+++ b/tests/modules/core/test_layout-xkb.py
@@ -5,3 +5,5 @@ pytest.importorskip("xkbgroup")
def test_load_module():
__import__("modules.core.layout-xkb")
+def test_load_symbolic_link_module():
+ __import__("modules.core.layout_xkb")
|
Modules names with hyphens (-)
Hey, ya.
Should we rename the modules below to match the [Python Naming Conventions](https://www.python.org/dev/peps/pep-0008/#package-and-module-names)?
```sh
modules/core/layout-xkb.py
modules/contrib/layout-xkbswitch.py
modules/contrib/arch-update.py
modules/contrib/battery-upower.py
```
These modules work as expected (I think) in the status bar, but I can't import these modules into a test file using `import`.
```sh
E import modules.contrib.arch-update
E ^
E SyntaxError: invalid syntax
```
Any ideas?
|
0.0
|
96f8e92822f8b72287ef97bbdd9a0c9bf1a063da
|
[
"tests/modules/contrib/test_arch-update.py::test_load_symbolic_link_module",
"tests/modules/contrib/test_layout-xkbswitch.py::test_load_symbolic_link_module"
] |
[
"tests/modules/contrib/test_arch-update.py::test_load_module",
"tests/modules/contrib/test_layout-xkbswitch.py::test_load_module"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_added_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-01 22:19:14+00:00
|
mit
| 5,932
|
|
Azure__iotedgedev-173
|
diff --git a/iotedgedev/azurecli.py b/iotedgedev/azurecli.py
index c5bce70..6bce331 100644
--- a/iotedgedev/azurecli.py
+++ b/iotedgedev/azurecli.py
@@ -226,10 +226,10 @@ class AzureCli:
return result
- def apply_configuration(self, deviceId, connection_string, config):
- self.output.status(f("Deploying '{config}' to '{deviceId}'..."))
+ def apply_configuration(self, device_id, connection_string, hub_name, config):
+ self.output.status(f("Deploying '{config}' to '{device_id}'..."))
- return self.invoke_az_cli_outproc(["iot", "hub", "apply-configuration", "-d", deviceId, "-k", config, "-l", connection_string], error_message=f("Failed to deploy '{config}' to '{deviceId}'..."), suppress_output=True)
+ return self.invoke_az_cli_outproc(["iot", "hub", "apply-configuration", "-d", device_id, "-n", hub_name, "-k", config, "-l", connection_string], error_message=f("Failed to deploy '{config}' to '{device_id}'..."), suppress_output=True)
def get_free_iothub(self):
with output_io_cls() as io:
diff --git a/iotedgedev/connectionstring.py b/iotedgedev/connectionstring.py
index cc29b68..2c8c19e 100644
--- a/iotedgedev/connectionstring.py
+++ b/iotedgedev/connectionstring.py
@@ -1,10 +1,10 @@
class ConnectionString:
def __init__(self, value):
- self.value = value
+ self.ConnectionString = value
self.data = dict()
- if self.value:
- parts = value.split(';')
+ if self.ConnectionString:
+ parts = self.ConnectionString.split(';')
if len(parts) > 0:
for part in parts:
subpart = part.split('=', 1)
@@ -13,6 +13,8 @@ class ConnectionString:
if self.data:
self.HostName = self["hostname"]
+ if self.HostName:
+ self.HubName = self.HostName.split('.')[0]
self.SharedAccessKey = self["sharedaccesskey"]
def __getitem__(self, key):
@@ -23,7 +25,7 @@ class IoTHubConnectionString(ConnectionString):
def __init__(self, value):
ConnectionString.__init__(self, value)
- if self.value:
+ if self.ConnectionString:
self.SharedAccessKeyName = self["sharedaccesskeyname"]
@@ -31,5 +33,5 @@ class DeviceConnectionString(ConnectionString):
def __init__(self, value):
ConnectionString.__init__(self, value)
- if self.value:
+ if self.ConnectionString:
self.DeviceId = self["deviceid"]
diff --git a/iotedgedev/edge.py b/iotedgedev/edge.py
index 6e71ba0..4d20943 100644
--- a/iotedgedev/edge.py
+++ b/iotedgedev/edge.py
@@ -10,11 +10,11 @@ class Edge:
self.output.header("DEPLOYING CONFIGURATION")
- self.envvars.verify_envvar_has_val("IOTHUB_CONNECTION_STRING", self.envvars.IOTHUB_CONNECTION_STRING)
- self.envvars.verify_envvar_has_val("DEVICE_CONNECTION_STRING", self.envvars.DEVICE_CONNECTION_STRING)
+ self.envvars.verify_envvar_has_val("IOTHUB_CONNECTION_INFO", self.envvars.IOTHUB_CONNECTION_INFO)
+ self.envvars.verify_envvar_has_val("DEVICE_CONNECTION_INFO", self.envvars.DEVICE_CONNECTION_INFO)
self.envvars.verify_envvar_has_val("DEPLOYMENT_CONFIG_FILE", self.envvars.DEPLOYMENT_CONFIG_FILE)
- self.azure_cli.apply_configuration(self.envvars.DEVICE_CONNECTION_INFO.DeviceId, self.envvars.IOTHUB_CONNECTION_STRING, self.envvars.DEPLOYMENT_CONFIG_FILE_PATH)
+ self.azure_cli.apply_configuration(self.envvars.DEVICE_CONNECTION_INFO.DeviceId, self.envvars.IOTHUB_CONNECTION_INFO.ConnectionString, self.envvars.IOTHUB_CONNECTION_INFO.HubName, self.envvars.DEPLOYMENT_CONFIG_FILE_PATH)
self.output.footer("DEPLOYMENT COMPLETE")
\ No newline at end of file
|
Azure/iotedgedev
|
ce59bad1286bf650d442b2b7fbe16a3db676a497
|
diff --git a/tests/test_connectionstring.py b/tests/test_connectionstring.py
new file mode 100644
index 0000000..21d0dc9
--- /dev/null
+++ b/tests/test_connectionstring.py
@@ -0,0 +1,78 @@
+import os
+import pytest
+from dotenv import load_dotenv
+from iotedgedev.connectionstring import ConnectionString, IoTHubConnectionString, DeviceConnectionString
+
+emptystring = ""
+valid_connectionstring = "HostName=testhub.azure-devices.net;SharedAccessKey=gibberish"
+valid_iothub_connectionstring = "HostName=testhub.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=moregibberish"
+valid_device_connectionstring = "HostName=testhub.azure-devices.net;DeviceId=testdevice;SharedAccessKey=othergibberish"
+invalid_connectionstring = "HostName=azure-devices.net;SharedAccessKey=gibberish"
+invalid_iothub_connectionstring = "HostName=testhub.azure-devices.net;SharedAccessKey=moregibberish"
+invalid_device_connectionstring = "HostName=testhub.azure-devices.net;DeviceId=;SharedAccessKey=othergibberish"
+
+def test_empty_connectionstring():
+ connectionstring = ConnectionString(emptystring)
+ assert not connectionstring.data
+
+def test_empty_iothub_connectionstring():
+ connectionstring = IoTHubConnectionString(emptystring)
+ assert not connectionstring.data
+
+def test_empty_device_connectionstring():
+ connectionstring = DeviceConnectionString(emptystring)
+ assert not connectionstring.data
+
+def test_valid_connectionstring():
+ connectionstring = ConnectionString(valid_connectionstring)
+ assert connectionstring.HostName == "testhub.azure-devices.net"
+ assert connectionstring.HubName == "testhub"
+ assert connectionstring.SharedAccessKey == "gibberish"
+
+def test_valid_iothub_connectionstring():
+ connectionstring = IoTHubConnectionString(valid_iothub_connectionstring)
+ assert connectionstring.HostName == "testhub.azure-devices.net"
+ assert connectionstring.HubName == "testhub"
+ assert connectionstring.SharedAccessKeyName == "iothubowner"
+ assert connectionstring.SharedAccessKey == "moregibberish"
+
+def test_valid_devicehub_connectionstring():
+ connectionstring = DeviceConnectionString(valid_device_connectionstring)
+ assert connectionstring.HostName == "testhub.azure-devices.net"
+ assert connectionstring.HubName == "testhub"
+ assert connectionstring.DeviceId == "testdevice"
+ assert connectionstring.SharedAccessKey == "othergibberish"
+
+def test_invalid_connectionstring():
+ connectionstring = ConnectionString(invalid_connectionstring)
+ assert connectionstring.HubName != "testhub"
+
+def test_invalid_iothub_connectionstring():
+ with pytest.raises(KeyError):
+ IoTHubConnectionString(invalid_iothub_connectionstring)
+
+def test_invalid_devicehub_connectionstring():
+ connectionstring = DeviceConnectionString(invalid_device_connectionstring)
+ assert connectionstring.HostName == "testhub.azure-devices.net"
+ assert connectionstring.HubName == "testhub"
+ assert not connectionstring.DeviceId
+ assert connectionstring.SharedAccessKey == "othergibberish"
+
+def test_valid_env_iothub_connectionstring():
+ load_dotenv(".env")
+ env_iothub_connectionstring = os.getenv("IOTHUB_CONNECTION_STRING")
+ connectionstring = IoTHubConnectionString(env_iothub_connectionstring)
+ assert connectionstring.HostName
+ assert connectionstring.HubName
+ assert connectionstring.SharedAccessKey
+ assert connectionstring.SharedAccessKeyName
+
+def test_valid_env_device_connectionstring():
+ load_dotenv(".env")
+ env_device_connectionstring = os.getenv("DEVICE_CONNECTION_STRING")
+ connectionstring = DeviceConnectionString(env_device_connectionstring)
+ assert connectionstring.HostName
+ assert connectionstring.HubName
+ assert connectionstring.SharedAccessKey
+ assert connectionstring.DeviceId
+
\ No newline at end of file
diff --git a/tests/test_iotedgedev.py b/tests/test_iotedgedev.py
index 2d08bad..c809c15 100644
--- a/tests/test_iotedgedev.py
+++ b/tests/test_iotedgedev.py
@@ -153,7 +153,7 @@ def test_monitor(request, capfd):
print (err)
print (result.output)
- assert 'application properties' in out
+ assert 'timeCreated' in out
@pytest.fixture
|
AZ IOT HUB apply-configuration needs hubname.
If user has old version of az cli iot extension installed they get this:
`az iot hub apply-configuration: error: argument --hub-name/-n is required
`
- add the -n parameter to the apply-configuration call. you can get it in IOTHUB_CONNECTION_INFO.HostName
apply-configuration might need ONLY hubname, but HostName has [name].azuredevices.net.
Therefore, You might have to split the ConnectionString.HostName property and add a new property to that class called HubName.
|
0.0
|
ce59bad1286bf650d442b2b7fbe16a3db676a497
|
[
"tests/test_connectionstring.py::test_valid_connectionstring",
"tests/test_connectionstring.py::test_valid_iothub_connectionstring",
"tests/test_connectionstring.py::test_valid_devicehub_connectionstring",
"tests/test_connectionstring.py::test_invalid_connectionstring",
"tests/test_connectionstring.py::test_invalid_devicehub_connectionstring"
] |
[
"tests/test_connectionstring.py::test_empty_connectionstring",
"tests/test_connectionstring.py::test_empty_iothub_connectionstring",
"tests/test_connectionstring.py::test_empty_device_connectionstring",
"tests/test_connectionstring.py::test_invalid_iothub_connectionstring"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-05-16 17:52:55+00:00
|
mit
| 59
|
|
podhmo__swagger-marshmallow-codegen-74
|
diff --git a/swagger_marshmallow_codegen/codegen/config.py b/swagger_marshmallow_codegen/codegen/config.py
index 429da80..56f8b2f 100644
--- a/swagger_marshmallow_codegen/codegen/config.py
+++ b/swagger_marshmallow_codegen/codegen/config.py
@@ -2,10 +2,11 @@ from __future__ import annotations
import typing_extensions as tx
-class ConfigDict(tx.TypedDict):
+class ConfigDict(tx.TypedDict, total=False):
schema: bool
input: bool
output: bool
emit_schema_even_primitive_type: bool
skip_header_comment: bool
+ header_comment: str
diff --git a/swagger_marshmallow_codegen/codegen/v2/codegen.py b/swagger_marshmallow_codegen/codegen/v2/codegen.py
index 568f432..ecf29f2 100644
--- a/swagger_marshmallow_codegen/codegen/v2/codegen.py
+++ b/swagger_marshmallow_codegen/codegen/v2/codegen.py
@@ -95,7 +95,9 @@ class SchemaWriter:
logger.debug(" nested: %s, %s", caller_name, field_class_name)
if opts:
kwargs = LazyFormat(", {}", kwargs)
- value = LazyFormat("{}(lambda: {}(){})", caller_name, field_class_name, kwargs)
+ value = LazyFormat(
+ "{}(lambda: {}(){})", caller_name, field_class_name, kwargs
+ )
else:
if caller_name == "fields.Nested":
caller_name = "fields.Field"
@@ -488,9 +490,14 @@ class Codegen:
def resolver(self) -> Resolver:
return self.accessor.resolver
- def write_header(self, c):
- c.im.stmt("# -*- coding:utf-8 -*-")
- c.im.stmt("# this is auto-generated by swagger-marshmallow-codegen")
+ def write_header(self, c, *, comment: t.Optional[str] = None):
+ if comment is None:
+ comment = """\
+# this is auto-generated by swagger-marshmallow-codegen
+from __future__ import annotations
+"""
+ for line in comment.splitlines():
+ c.im.stmt(line)
def write_import_(self, c):
c.from_(*self.schema_class_path.rsplit(":", 1))
@@ -509,7 +516,7 @@ class Codegen:
def codegen(self, d, ctx=None):
c = ctx or Context()
if not self.accessor.config.get("skip_header_comment", False):
- self.write_header(c)
+ self.write_header(c, comment=self.accessor.config.get("header_comment"))
c.m.sep()
self.write_import_(c)
self.write_body(c, d)
diff --git a/swagger_marshmallow_codegen/codegen/v3/codegen.py b/swagger_marshmallow_codegen/codegen/v3/codegen.py
index 4b8d214..0e27d70 100644
--- a/swagger_marshmallow_codegen/codegen/v3/codegen.py
+++ b/swagger_marshmallow_codegen/codegen/v3/codegen.py
@@ -1,1 +1,2 @@
from ..v2.codegen import Codegen
+__all__ = ["Codegen"]
diff --git a/swagger_marshmallow_codegen/dispatcher.py b/swagger_marshmallow_codegen/dispatcher.py
index a5252be..2bf21f1 100644
--- a/swagger_marshmallow_codegen/dispatcher.py
+++ b/swagger_marshmallow_codegen/dispatcher.py
@@ -22,7 +22,7 @@ TYPE_MAP = {
Pair(type="string", format=None): "marshmallow.fields:String",
Pair(type="boolean", format=None): "marshmallow.fields:Boolean",
Pair(type="string", format="uuid"): "marshmallow.fields:UUID",
- Pair(type="string", format="date-time"): "marshmallow.fields:DateTime",
+ Pair(type="string", format="date-time"): "marshmallow.fields:AwareDateTime",
Pair(type="string", format="date"): "marshmallow.fields:Date",
Pair(type="string", format="time"): "marshmallow.fields:Time",
Pair(type="string", format="email"): "marshmallow.fields:Email",
diff --git a/swagger_marshmallow_codegen/resolver.py b/swagger_marshmallow_codegen/resolver.py
index 844e9a2..50a350c 100644
--- a/swagger_marshmallow_codegen/resolver.py
+++ b/swagger_marshmallow_codegen/resolver.py
@@ -1,6 +1,5 @@
# -*- coding:utf-8 -*-
import logging
-import sys
from collections import OrderedDict
import dictknife
from .langhelpers import titleize, normalize
|
podhmo/swagger-marshmallow-codegen
|
6d5dcfa88e8882a293434e3c3fcbf4837fd21c7d
|
diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00default.py b/swagger_marshmallow_codegen/tests/legacy_dst/00default.py
index fc76808..090bfa1 100644
--- a/swagger_marshmallow_codegen/tests/legacy_dst/00default.py
+++ b/swagger_marshmallow_codegen/tests/legacy_dst/00default.py
@@ -10,7 +10,8 @@ class X(Schema):
string = fields.String(missing=lambda: 'default')
integer = fields.Integer(missing=lambda: 10)
boolean = fields.Boolean(missing=lambda: True)
- datetime = fields.DateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1))
+ datetime = fields.AwareDateTime(missing=lambda: datetime.datetime(2000, 1, 1, 1, 1, 1, tzinfo=datetime.timezone.utc))
+ date = fields.Date(missing=lambda: datetime.date(2000, 1, 1))
object = fields.Nested(lambda: XObject(), missing=lambda: OrderedDict([('name', 'foo'), ('age', 20)]))
array = fields.List(fields.Integer(), missing=lambda: [1, 2, 3])
diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py b/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py
index c71e243..0890aac 100644
--- a/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py
+++ b/swagger_marshmallow_codegen/tests/legacy_dst/00empty.py
@@ -1,3 +1,4 @@
+# flake8: noqa
from marshmallow import (
Schema,
fields,
diff --git a/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py b/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py
index b8effe3..3b4e48e 100644
--- a/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py
+++ b/swagger_marshmallow_codegen/tests/legacy_dst/00paths.py
@@ -15,7 +15,7 @@ class Pet(Schema):
name = fields.String(required=True, description="Pet's name", validate=[Length(min=1, max=100, equal=None)])
animal_type = fields.String(required=True, description='Kind of animal', validate=[Length(min=1, max=None, equal=None)])
tags = fields.Field(description='Custom tags')
- created = fields.DateTime(description='Creation time', dump_only=True)
+ created = fields.AwareDateTime(description='Creation time', dump_only=True)
class PetsInput:
diff --git a/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml b/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml
index df6f0e9..cad6cef 100644
--- a/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml
+++ b/swagger_marshmallow_codegen/tests/legacy_src/00default.yaml
@@ -14,6 +14,10 @@ definitions:
type: string
format: date-time
default: 2000-01-01T01:01:01Z
+ date:
+ type: string
+ format: date
+ default: 2000-01-01
object:
type: object
properties:
diff --git a/swagger_marshmallow_codegen/tests/test_codegen_legacy.py b/swagger_marshmallow_codegen/tests/test_codegen_legacy.py
index b3f67b9..d87dbd0 100644
--- a/swagger_marshmallow_codegen/tests/test_codegen_legacy.py
+++ b/swagger_marshmallow_codegen/tests/test_codegen_legacy.py
@@ -8,51 +8,53 @@ here = pathlib.Path(__file__).parent
@pytest.mark.parametrize(
- "src_file, dst_file",
+ "src_file, dst_file, header_comment",
[
- ("./legacy_src/00person.yaml", "./legacy_dst/00person.py"),
- ("./legacy_src/01person.yaml", "./legacy_dst/01person.py"),
- ("./legacy_src/02person.yaml", "./legacy_dst/02person.py"),
- ("./legacy_src/03person.yaml", "./legacy_dst/03person.py"),
- ("./legacy_src/04person.yaml", "./legacy_dst/04person.py"),
- ("./legacy_src/05person.yaml", "./legacy_dst/05person.py"),
- ("./legacy_src/00commit.yaml", "./legacy_dst/00commit.py"),
- ("./legacy_src/01commit.yaml", "./legacy_dst/01commit.py"),
- ("./legacy_src/00emojis.yaml", "./legacy_dst/00emojis.py"),
- ("./legacy_src/00stat.yaml", "./legacy_dst/00stat.py"),
- ("./legacy_src/00default.yaml", "./legacy_dst/00default.py"),
- ("./legacy_src/00maximum.yaml", "./legacy_dst/00maximum.py"),
- ("./legacy_src/00length.yaml", "./legacy_dst/00length.py"),
- ("./legacy_src/00regex.yaml", "./legacy_dst/00regex.py"),
- ("./legacy_src/00enum.yaml", "./legacy_dst/00enum.py"),
- ("./legacy_src/00items.yaml", "./legacy_dst/00items.py"),
- ("./legacy_src/00readonly.yaml", "./legacy_dst/00readonly.py"),
- ("./legacy_src/00allOf.yaml", "./legacy_dst/00allOf.py"),
- ("./legacy_src/00allOf2.yaml", "./legacy_dst/00allOf2.py"),
- ("./legacy_src/01allOf2.yaml", "./legacy_dst/01allOf2.py"),
- ("./legacy_src/02allOf2.yaml", "./legacy_dst/02allOf2.py"),
- ("./legacy_src/00paths.yaml", "./legacy_dst/00paths.py"),
- ("./legacy_src/01paths.yaml", "./legacy_dst/01paths.py"),
- ("./legacy_src/02paths.yaml", "./legacy_dst/02paths.py"),
- ("./legacy_src/03paths.yaml", "./legacy_dst/03paths.py"),
- ("./legacy_src/00empty.yaml", "./legacy_dst/00empty.py"),
- ("./legacy_src/01empty.yaml", "./legacy_dst/01empty.py"),
+ ("./legacy_src/00person.yaml", "./legacy_dst/00person.py", ""),
+ ("./legacy_src/01person.yaml", "./legacy_dst/01person.py", ""),
+ ("./legacy_src/02person.yaml", "./legacy_dst/02person.py", ""),
+ ("./legacy_src/03person.yaml", "./legacy_dst/03person.py", ""),
+ ("./legacy_src/04person.yaml", "./legacy_dst/04person.py", ""),
+ ("./legacy_src/05person.yaml", "./legacy_dst/05person.py", ""),
+ ("./legacy_src/00commit.yaml", "./legacy_dst/00commit.py", ""),
+ ("./legacy_src/01commit.yaml", "./legacy_dst/01commit.py", ""),
+ ("./legacy_src/00emojis.yaml", "./legacy_dst/00emojis.py", ""),
+ ("./legacy_src/00stat.yaml", "./legacy_dst/00stat.py", ""),
+ ("./legacy_src/00default.yaml", "./legacy_dst/00default.py", ""),
+ ("./legacy_src/00maximum.yaml", "./legacy_dst/00maximum.py", ""),
+ ("./legacy_src/00length.yaml", "./legacy_dst/00length.py", ""),
+ ("./legacy_src/00regex.yaml", "./legacy_dst/00regex.py", ""),
+ ("./legacy_src/00enum.yaml", "./legacy_dst/00enum.py", ""),
+ ("./legacy_src/00items.yaml", "./legacy_dst/00items.py", ""),
+ ("./legacy_src/00readonly.yaml", "./legacy_dst/00readonly.py", ""),
+ ("./legacy_src/00allOf.yaml", "./legacy_dst/00allOf.py", ""),
+ ("./legacy_src/00allOf2.yaml", "./legacy_dst/00allOf2.py", ""),
+ ("./legacy_src/01allOf2.yaml", "./legacy_dst/01allOf2.py", ""),
+ ("./legacy_src/02allOf2.yaml", "./legacy_dst/02allOf2.py", ""),
+ ("./legacy_src/00paths.yaml", "./legacy_dst/00paths.py", ""),
+ ("./legacy_src/01paths.yaml", "./legacy_dst/01paths.py", ""),
+ ("./legacy_src/02paths.yaml", "./legacy_dst/02paths.py", ""),
+ ("./legacy_src/03paths.yaml", "./legacy_dst/03paths.py", ""),
+ ("./legacy_src/00empty.yaml", "./legacy_dst/00empty.py", "# flake8: noqa"),
+ ("./legacy_src/01empty.yaml", "./legacy_dst/01empty.py", ""),
(
"./legacy_src/00list_with_options.yaml",
"./legacy_dst/00list_with_options.py",
+ "",
),
- ("./legacy_src/00reserved.yaml", "./legacy_dst/00reserved.py"),
- ("./legacy_src/00typearray.yaml", "./legacy_dst/00typearray.py"),
- ("./legacy_src/00additional.yaml", "./legacy_dst/00additional.py"),
- ("./legacy_src/01additional.yaml", "./legacy_dst/01additional.py"),
- ("./legacy_src/00nullable.yaml", "./legacy_dst/00nullable.py"),
- ("./legacy_src/00primitiveapi.yaml", "./legacy_dst/00primitiveapi.py"),
+ ("./legacy_src/00reserved.yaml", "./legacy_dst/00reserved.py", ""),
+ ("./legacy_src/00typearray.yaml", "./legacy_dst/00typearray.py", ""),
+ ("./legacy_src/00additional.yaml", "./legacy_dst/00additional.py", ""),
+ ("./legacy_src/01additional.yaml", "./legacy_dst/01additional.py", ""),
+ ("./legacy_src/00nullable.yaml", "./legacy_dst/00nullable.py", ""),
+ ("./legacy_src/00primitiveapi.yaml", "./legacy_dst/00primitiveapi.py", ""),
# ("./legacy_src/00patternProperties.yaml", "./legacy_dst/00patternProperties.py"), not supported yet
],
)
-def test_v2(
- src_file,
- dst_file,
+def test(
+ src_file: str,
+ dst_file: str,
+ header_comment: str,
):
from swagger_marshmallow_codegen.lifting import lifting_definition
from swagger_marshmallow_codegen.codegen import Context
@@ -62,9 +64,8 @@ def test_v2(
get_codegen().codegen(
lifting_definition(d),
- {"schema": True, "input": True, "output": True},
+ {"schema": True, "input": True, "output": True, "header_comment": header_comment},
ctx=ctx,
- test=True,
)
expected = load_dstfile(dst_file, here=here).rstrip("\n")
|
CI is broken, DateTime field handling is changed
refs https://github.com/marshmallow-code/marshmallow/issues/1234
|
0.0
|
6d5dcfa88e8882a293434e3c3fcbf4837fd21c7d
|
[
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00person.yaml-./legacy_dst/00person.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01person.yaml-./legacy_dst/01person.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02person.yaml-./legacy_dst/02person.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/03person.yaml-./legacy_dst/03person.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/04person.yaml-./legacy_dst/04person.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/05person.yaml-./legacy_dst/05person.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00commit.yaml-./legacy_dst/00commit.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01commit.yaml-./legacy_dst/01commit.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00emojis.yaml-./legacy_dst/00emojis.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00stat.yaml-./legacy_dst/00stat.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00maximum.yaml-./legacy_dst/00maximum.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00length.yaml-./legacy_dst/00length.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00regex.yaml-./legacy_dst/00regex.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00enum.yaml-./legacy_dst/00enum.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00items.yaml-./legacy_dst/00items.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00readonly.yaml-./legacy_dst/00readonly.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00allOf.yaml-./legacy_dst/00allOf.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00allOf2.yaml-./legacy_dst/00allOf2.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01allOf2.yaml-./legacy_dst/01allOf2.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02allOf2.yaml-./legacy_dst/02allOf2.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00paths.yaml-./legacy_dst/00paths.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01paths.yaml-./legacy_dst/01paths.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/02paths.yaml-./legacy_dst/02paths.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/03paths.yaml-./legacy_dst/03paths.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00empty.yaml-./legacy_dst/00empty.py-#",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01empty.yaml-./legacy_dst/01empty.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00list_with_options.yaml-./legacy_dst/00list_with_options.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00reserved.yaml-./legacy_dst/00reserved.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00typearray.yaml-./legacy_dst/00typearray.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00additional.yaml-./legacy_dst/00additional.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/01additional.yaml-./legacy_dst/01additional.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00nullable.yaml-./legacy_dst/00nullable.py-]",
"swagger_marshmallow_codegen/tests/test_codegen_legacy.py::test[./legacy_src/00primitiveapi.yaml-./legacy_dst/00primitiveapi.py-]"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-11-03 10:22:46+00:00
|
mit
| 4,619
|
|
meejah__txtorcon-390
|
diff --git a/docs/releases.rst b/docs/releases.rst
index ea32618..517a41c 100644
--- a/docs/releases.rst
+++ b/docs/releases.rst
@@ -18,6 +18,7 @@ See also :ref:`api_stability`.
`git main <https://github.com/meejah/txtorcon>`_ *will likely become v23.6.0*
* Fix test-failures on Python 3.12
+ * Particular GETINFO hanging (`#389 <https://github.com/meejah/txtorcon/issues/389>`_)
v23.5.0
diff --git a/txtorcon/torcontrolprotocol.py b/txtorcon/torcontrolprotocol.py
index 887bd0a..e882160 100644
--- a/txtorcon/torcontrolprotocol.py
+++ b/txtorcon/torcontrolprotocol.py
@@ -232,6 +232,12 @@ class TorControlProtocol(LineOnlyReceiver):
:class:`txtorcon.TorState`, which is also the place to go if you
wish to add your own stream or circuit listeners.
"""
+ # override Twisted's LineOnlyReceiver maximum line-length. At
+ # least "GETINFO md/id/X" for some Xse exceeds 16384 (2**14, the
+ # default) and thus causes the control connection to
+ # fail. control.c defines MAX_COMMAND_LINE_LENGTH as 1024*1024 so
+ # we use that
+ MAX_LENGTH = 2 ** 20
def __init__(self, password_function=None):
"""
@@ -274,11 +280,6 @@ class TorControlProtocol(LineOnlyReceiver):
:func:`when_disconnected` instead)
"""
- self._when_disconnected = SingleObserver()
- """
- Internal use. A :class:`SingleObserver` for when_disconnected()
- """
-
self._when_disconnected = SingleObserver()
"""
Private. See :func:`.when_disconnected`
@@ -356,7 +357,7 @@ class TorControlProtocol(LineOnlyReceiver):
self.stop_debug()
def start_debug(self):
- self.debuglog = open('txtorcon-debug.log', 'w')
+ self.debuglog = open('txtorcon-debug.log', 'wb')
def stop_debug(self):
def noop(*args, **kw):
@@ -692,10 +693,14 @@ class TorControlProtocol(LineOnlyReceiver):
def connectionLost(self, reason):
"Protocol API"
txtorlog.msg('connection terminated: ' + str(reason))
- if reason.check(ConnectionDone):
- self._when_disconnected.fire(self)
- else:
- self._when_disconnected.fire(reason)
+ self._when_disconnected.fire(
+ Failure(
+ TorDisconnectError(
+ text="Tor connection terminated",
+ error=reason,
+ )
+ )
+ )
# ...and this is why we don't do on_disconnect = Deferred() :(
# and instead should have had on_disconnect() method that
@@ -712,8 +717,10 @@ class TorControlProtocol(LineOnlyReceiver):
else:
self.on_disconnect.errback(reason)
self.on_disconnect = None
- self._when_disconnected.fire(self)
+
outstanding = [self.command] + self.commands if self.command else self.commands
+ self.command = None
+ self.defer = None
for d, cmd, cmd_arg in outstanding:
if not d.called:
d.errback(
@@ -754,6 +761,10 @@ class TorControlProtocol(LineOnlyReceiver):
if len(self.commands):
self.command = self.commands.pop(0)
(d, cmd, cmd_arg) = self.command
+
+ if self._when_disconnected.already_fired(d):
+ return
+
self.defer = d
self.debuglog.write(cmd + b'\n')
diff --git a/txtorcon/util.py b/txtorcon/util.py
index 4b772e3..406a0f5 100644
--- a/txtorcon/util.py
+++ b/txtorcon/util.py
@@ -473,6 +473,19 @@ class SingleObserver(object):
self._observers = []
self._fired = self._NotFired
+ def has_fired(self):
+ return self._fired is not self._NotFired
+
+ def already_fired(self, d):
+ """
+ If we have already fired, callback `d` with our result.
+ :returns bool: True if we already fired, False otherwise
+ """
+ if self.has_fired():
+ d.callback(self._fired)
+ return True
+ return False
+
def when_fired(self):
d = defer.Deferred()
if self._fired is not self._NotFired:
|
meejah/txtorcon
|
c0c98ff4bb888b9e1e2b5b53e6a0ce5a8be3ba69
|
diff --git a/test/test_torcontrolprotocol.py b/test/test_torcontrolprotocol.py
index 23ddeec..e15bdf0 100644
--- a/test/test_torcontrolprotocol.py
+++ b/test/test_torcontrolprotocol.py
@@ -226,7 +226,7 @@ class DisconnectionTests(unittest.TestCase):
it_was_called.yes = False
d = self.protocol.when_disconnected()
- d.addCallback(it_was_called)
+ d.addBoth(it_was_called)
f = failure.Failure(error.ConnectionDone("It's all over"))
self.protocol.connectionLost(f)
self.assertTrue(it_was_called.yes)
@@ -284,6 +284,31 @@ class DisconnectionTests(unittest.TestCase):
self.protocol.connectionLost(f)
self.assertEqual(it_was_called.count, 2)
+ def test_disconnect_subsequent_commands(self):
+ """
+ commands issued after disconnect should errback
+ """
+
+ def it_was_called(f):
+ str(f)
+ it_was_called.count += 1
+ return None
+ it_was_called.count = 0
+
+ # one outstanding command
+ d0 = self.protocol.queue_command("some command0")
+ d0.addErrback(it_was_called)
+ self.protocol.on_disconnect.addErrback(lambda _: None)
+
+ f = failure.Failure(RuntimeError("The thing didn't do the stuff."))
+ self.protocol.connectionLost(f)
+
+ # one command issued _after_ we've disconnected
+ d1 = self.protocol.queue_command("some command1")
+ d1.addErrback(it_was_called)
+
+ self.assertEqual(it_was_called.count, 2)
+
class ProtocolTests(unittest.TestCase):
|
When tor process exits unexpectedly tor.protocol.get_info hangs
Here is a minimal reproduction for this issue:
```python
from twisted.internet.task import react
from twisted.internet.defer import ensureDeferred
import txtorcon
EXIT_RELAY_FP = [
'130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9', # this triggers the getinfo fail
'127F6358F68FFB7E437DBA51D6D4DAC47B9F78A7',
]
async def main(reactor):
try:
tor = await txtorcon.launch(
reactor,
kill_on_stderr=False,
progress_updates=lambda x, y, z: print(f"{x}%: {y} - {z}"),
)
except Exception as exc:
print(f"FAILED to start tor {exc}")
return
state = await tor.create_state()
for exit_fp in EXIT_RELAY_FP:
print(f"doing {exit_fp}")
try:
print("calling GETINFO")
info = await tor.protocol.get_info("md/id/" + exit_fp)
print(f"got {info}")
except Exception as exc:
print(f"FAILED to get info for {exit_fp} {exc}")
@react
def _main(reactor):
return ensureDeferred(main(reactor))
```
You can see that the last log lines are:
```
doing 130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9
calling GETINFO
FAILED to get info for 130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9 Tor unexpectedly disconnected while running: GETINFO md/id/130CFCF38BA3327E3001A1DB2A4B5ACBDAE248D9
doing 127F6358F68FFB7E437DBA51D6D4DAC47B9F78A7
calling GETINFO
Unhandled Error
Traceback (most recent call last):
Failure: builtins.RuntimeError: Tor exited with error-code 0
```
Where we are waiting on the `tor.protocol.get_info("md/id/" + exit_fp)` call.
I would expect to either get an errback, because the process is dead or have some other way to tell that I should not be calling it.
It would also be nice to be able to somehow know that tor has exited this way, as it's currently not possible to listen for the "tor exited" event.
This probably is also a tor bug, as it should not happen that the tor process exits when issuing this specific get_info command.
|
0.0
|
c0c98ff4bb888b9e1e2b5b53e6a0ce5a8be3ba69
|
[
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_subsequent_commands"
] |
[
"test/test_torcontrolprotocol.py::InterfaceTests::test_implements",
"test/test_torcontrolprotocol.py::InterfaceTests::test_object_implements",
"test/test_torcontrolprotocol.py::LogicTests::test_set_conf_wrong_args",
"test/test_torcontrolprotocol.py::FactoryTests::test_create",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_cookie",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_no_password",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_null",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password_deferred",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password_deferred_but_no_password",
"test/test_torcontrolprotocol.py::AuthenticationTests::test_authenticate_password_not_bytes",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_callback",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_errback",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_disconnect_outstanding_commands",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_when_disconnect",
"test/test_torcontrolprotocol.py::DisconnectionTests::test_when_disconnect_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_650_after_authenticate",
"test/test_torcontrolprotocol.py::ProtocolTests::test_addevent",
"test/test_torcontrolprotocol.py::ProtocolTests::test_async",
"test/test_torcontrolprotocol.py::ProtocolTests::test_async_multiline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_cookie_without_reading",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_dont_send_cookiefile",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_fail",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_no_auth_line",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_not_enough_cookie_data",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_not_enough_safecookie_data",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_password_when_cookie_unavailable",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_password_when_safecookie_unavailable",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_safecookie",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_safecookie_wrong_hash",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_unexisting_cookie_file",
"test/test_torcontrolprotocol.py::ProtocolTests::test_authenticate_unexisting_safecookie_file",
"test/test_torcontrolprotocol.py::ProtocolTests::test_bootstrap_callback",
"test/test_torcontrolprotocol.py::ProtocolTests::test_bootstrap_tor_does_not_support_signal_names",
"test/test_torcontrolprotocol.py::ProtocolTests::test_continuation_line",
"test/test_torcontrolprotocol.py::ProtocolTests::test_debug",
"test/test_torcontrolprotocol.py::ProtocolTests::test_dot",
"test/test_torcontrolprotocol.py::ProtocolTests::test_eventlistener",
"test/test_torcontrolprotocol.py::ProtocolTests::test_eventlistener_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getconf",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getconf_raw",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getconf_single",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_for_descriptor",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_incremental",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_incremental_continuation",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_multiline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_one_line",
"test/test_torcontrolprotocol.py::ProtocolTests::test_getinfo_single",
"test/test_torcontrolprotocol.py::ProtocolTests::test_minus_line_no_command",
"test/test_torcontrolprotocol.py::ProtocolTests::test_multiline_plus",
"test/test_torcontrolprotocol.py::ProtocolTests::test_multiline_plus_embedded_equals",
"test/test_torcontrolprotocol.py::ProtocolTests::test_newdesc",
"test/test_torcontrolprotocol.py::ProtocolTests::test_notify_after_getinfo",
"test/test_torcontrolprotocol.py::ProtocolTests::test_notify_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_plus_line_no_command",
"test/test_torcontrolprotocol.py::ProtocolTests::test_quit",
"test/test_torcontrolprotocol.py::ProtocolTests::test_remove_eventlistener",
"test/test_torcontrolprotocol.py::ProtocolTests::test_remove_eventlistener_multiple",
"test/test_torcontrolprotocol.py::ProtocolTests::test_response_with_no_request",
"test/test_torcontrolprotocol.py::ProtocolTests::test_setconf",
"test/test_torcontrolprotocol.py::ProtocolTests::test_setconf_multi",
"test/test_torcontrolprotocol.py::ProtocolTests::test_setconf_with_space",
"test/test_torcontrolprotocol.py::ProtocolTests::test_signal",
"test/test_torcontrolprotocol.py::ProtocolTests::test_signal_error",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_broadcast_no_code",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_broadcast_unknown_code",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_continuation",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_is_finish",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_multiline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_statemachine_singleline",
"test/test_torcontrolprotocol.py::ProtocolTests::test_twocommands",
"test/test_torcontrolprotocol.py::ParseTests::test_circuit_status",
"test/test_torcontrolprotocol.py::ParseTests::test_default_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_keywords_mutli_equals",
"test/test_torcontrolprotocol.py::ParseTests::test_multientry_keywords_2",
"test/test_torcontrolprotocol.py::ParseTests::test_multientry_keywords_3",
"test/test_torcontrolprotocol.py::ParseTests::test_multientry_keywords_4",
"test/test_torcontrolprotocol.py::ParseTests::test_multiline_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_multiline_keywords_with_spaces",
"test/test_torcontrolprotocol.py::ParseTests::test_network_status",
"test/test_torcontrolprotocol.py::ParseTests::test_unquoted_keywords",
"test/test_torcontrolprotocol.py::ParseTests::test_unquoted_keywords_empty",
"test/test_torcontrolprotocol.py::ParseTests::test_unquoted_keywords_singlequote"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-29 22:24:50+00:00
|
mit
| 3,852
|
|
alephdata__servicelayer-60
|
diff --git a/servicelayer/worker.py b/servicelayer/worker.py
index 939a3a3..f2a45b3 100644
--- a/servicelayer/worker.py
+++ b/servicelayer/worker.py
@@ -1,5 +1,6 @@
import signal
import logging
+import sys
from threading import Thread
from banal import ensure_list
from abc import ABC, abstractmethod
@@ -10,7 +11,13 @@ from servicelayer.cache import get_redis
from servicelayer.util import unpack_int
log = logging.getLogger(__name__)
+
+# When a worker thread is not blocking, it has to exit if no task is available.
+# `TASK_FETCH_RETRY`` determines how many times the worker thread will try to fetch
+# a task before quitting.
+# `INTERVAL`` determines the interval in seconds between each retry.
INTERVAL = 2
+TASK_FETCH_RETRY = 60 / INTERVAL
class Worker(ABC):
@@ -23,8 +30,10 @@ class Worker(ABC):
self.exit_code = 0
def _handle_signal(self, signal, frame):
- log.warning("Shutting down worker (signal %s)", signal)
+ log.warning(f"Shutting down worker (signal {signal})")
self.exit_code = int(signal)
+ # Exit eagerly without waiting for current task to finish running
+ sys.exit(self.exit_code)
def handle_safe(self, task):
try:
@@ -56,16 +65,25 @@ class Worker(ABC):
task.stage.queue(task.payload, task.context)
def process(self, blocking=True, interval=INTERVAL):
- while True:
+ retries = 0
+ while retries <= TASK_FETCH_RETRY:
if self.exit_code > 0:
+ log.info("Worker thread is exiting")
return self.exit_code
self.periodic()
stages = self.get_stages()
task = Stage.get_task(self.conn, stages, timeout=interval)
if task is None:
if not blocking:
- return self.exit_code
+ # If we get a null task, retry to fetch a task a bunch of times before quitting
+ if retries >= TASK_FETCH_RETRY:
+ log.info("Worker thread is exiting")
+ return self.exit_code
+ else:
+ retries += 1
continue
+ # when we get a good task, reset retry count
+ retries = 0
self.handle_safe(task)
def sync(self):
diff --git a/setup.py b/setup.py
index 88a915d..79b586f 100644
--- a/setup.py
+++ b/setup.py
@@ -30,7 +30,7 @@ setup(
install_requires=[
"banal >= 1.0.1, <2.0.0",
"normality >= 2.1.1, <3.0.0",
- "fakeredis == 1.7.0",
+ "fakeredis == 1.7.1",
"sqlalchemy >= 1.3",
"structlog >= 20.2.0, < 22.0.0",
"colorama >= 0.4.4, < 1.0.0",
@@ -39,7 +39,7 @@ setup(
"amazon": ["boto3 >= 1.11.9, <2.0.0"],
"google": [
"grpcio >= 1.32.0, <2.0.0",
- "google-cloud-storage >= 1.31.0, <2.0.0",
+ "google-cloud-storage >= 1.31.0, < 3.0.0",
],
"dev": [
"twine",
|
alephdata/servicelayer
|
ebde2a96658c9ecda9a7f9048cd106ef580dda5b
|
diff --git a/tests/test_worker.py b/tests/test_worker.py
index 60e14a9..f4af193 100644
--- a/tests/test_worker.py
+++ b/tests/test_worker.py
@@ -1,4 +1,5 @@
from unittest import TestCase
+import pytest
from servicelayer.cache import get_fakeredis
from servicelayer.jobs import Job
@@ -34,8 +35,6 @@ class WorkerTest(TestCase):
assert job.is_done()
assert worker.exit_code == 0, worker.exit_code
assert worker.test_done == 1, worker.test_done
- worker._handle_signal(5, None)
- assert worker.exit_code == 5, worker.exit_code
worker.retry(task)
worker.run(blocking=False)
assert job.is_done()
@@ -45,3 +44,9 @@ class WorkerTest(TestCase):
worker.run(blocking=False)
assert job.is_done()
assert worker.exit_code == 0, worker.exit_code
+ try:
+ worker._handle_signal(5, None)
+ except SystemExit as exc:
+ assert exc.code == 5, exc.code
+ with pytest.raises(SystemExit) as exc: # noqa
+ worker._handle_signal(5, None)
|
Worker doesn't exit on KeyboardInterrupt when running in multi-threaded mode
|
0.0
|
ebde2a96658c9ecda9a7f9048cd106ef580dda5b
|
[
"tests/test_worker.py::WorkerTest::test_run"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-02-17 11:13:08+00:00
|
mit
| 1,018
|
|
spyder-ide__spyder-kernels-86
|
diff --git a/spyder_kernels/customize/spydercustomize.py b/spyder_kernels/customize/spydercustomize.py
index 09247ed..64aa18f 100644
--- a/spyder_kernels/customize/spydercustomize.py
+++ b/spyder_kernels/customize/spydercustomize.py
@@ -534,8 +534,13 @@ class UserModuleReloader(object):
def __init__(self, namelist=None, pathlist=None):
if namelist is None:
namelist = []
- spy_modules = ['sitecustomize', 'spyder', 'spyderplugins']
+
+ # Spyder modules
+ spy_modules = ['spyder_kernels']
+
+ # Matplotlib modules
mpl_modules = ['matplotlib', 'tkinter', 'Tkinter']
+
# Add other, necessary modules to the UMR blacklist
# astropy: see issue 6962
# pytorch: see issue 7041
@@ -550,12 +555,19 @@ class UserModuleReloader(object):
if pathlist is None:
pathlist = []
- self.pathlist = pathlist
+ self.pathlist = self.create_pathlist(pathlist)
+
+ # List of previously loaded modules
self.previous_modules = list(sys.modules.keys())
- @property
- def skip_paths(self):
- """Python library paths to be skipped from module reloading."""
+ # List of module names to reload
+ self.modnames_to_reload = []
+
+ def create_pathlist(self, initial_pathlist):
+ """
+ Add to pathlist Python library paths to be skipped from module
+ reloading.
+ """
try:
paths = sysconfig.get_paths()
lib_paths = [paths['stdlib'],
@@ -563,22 +575,28 @@ class UserModuleReloader(object):
paths['scripts'],
paths['data']]
- return lib_paths
+ return initial_pathlist + lib_paths
except Exception:
- return []
+ return initial_pathlist
- def is_module_blacklisted(self, modname, modpath):
+ def is_module_reloadable(self, module, modname):
+ """Decide if a module is reloadable or not."""
if HAS_CYTHON:
# Don't return cached inline compiled .PYX files
return True
- for path in [sys.prefix]+self.pathlist:
- if modpath.startswith(path):
- return True
else:
- return set(modname.split('.')) & set(self.namelist)
+ if (self.is_module_in_pathlist(module) or
+ self.is_module_in_namelist(modname)):
+ return False
+ else:
+ return True
+
+ def is_module_in_namelist(self, modname):
+ """Decide if a module can be reloaded or not according to its name."""
+ return set(modname.split('.')) & set(self.namelist)
- def is_module_reloadable(self, module):
- """Decide if a module can be reloaded or not."""
+ def is_module_in_pathlist(self, module):
+ """Decide if a module can be reloaded or not according to its path."""
modpath = getattr(module, '__file__', None)
# Skip module according to different criteria
@@ -586,12 +604,12 @@ class UserModuleReloader(object):
# *module* is a C module that is statically linked into the
# interpreter. There is no way to know its path, so we
# choose to ignore it.
- return False
- elif any([p in modpath for p in self.skip_paths]):
+ return True
+ elif any([p in modpath for p in self.pathlist]):
# We don't want to reload modules that belong to the
# standard library or installed to site-packages,
# just modules created by the user.
- return False
+ return True
elif not os.name == 'nt':
# Module paths containing the strings below can be ihherited
# from the default Linux installation or Homebrew in a
@@ -601,39 +619,49 @@ class UserModuleReloader(object):
r'^/usr/.*/dist-packages/.*',
r'^/Library/.*'
]
+
if [p for p in patterns if re.search(p, modpath)]:
- return False
- else:
return True
+ else:
+ return False
else:
- return True
+ return False
def run(self, verbose=False):
"""
- Del user modules to force Python to deeply reload them
+ Delete user modules to force Python to deeply reload them
Do not del modules which are considered as system modules, i.e.
modules installed in subdirectories of Python interpreter's binary
Do not del C modules
"""
- log = []
+ self.modnames_to_reload = []
for modname, module in list(sys.modules.items()):
if modname not in self.previous_modules:
# Decide if a module can be reloaded or not
- if not self.is_module_reloadable(module):
- continue
-
- # Reload module
- if not self.is_module_blacklisted(modname, modpath):
- log.append(modname)
+ if self.is_module_reloadable(module, modname):
+ self.modnames_to_reload.append(modname)
del sys.modules[modname]
+ else:
+ continue
# Report reloaded modules
- if verbose and log:
+ if verbose and self.modnames_to_reload:
+ modnames = self.modnames_to_reload
_print("\x1b[4;33m%s\x1b[24m%s\x1b[0m"\
- % ("Reloaded modules", ": "+", ".join(log)))
+ % ("Reloaded modules", ": "+", ".join(modnames)))
-__umr__ = None
+
+if os.environ.get("SPY_UMR_ENABLED", "").lower() == "true":
+ namelist = os.environ.get("SPY_UMR_NAMELIST", None)
+ if namelist is not None:
+ try:
+ namelist = namelist.split(',')
+ except Exception:
+ namelist = None
+ __umr__ = UserModuleReloader(namelist=namelist)
+else:
+ __umr__ = None
#==============================================================================
@@ -715,16 +743,10 @@ def runfile(filename, args=None, wdir=None, namespace=None, post_mortem=False):
# UnicodeError, TypeError --> eventually raised in Python 2
# AttributeError --> systematically raised in Python 3
pass
- global __umr__
- if os.environ.get("SPY_UMR_ENABLED", "").lower() == "true":
- if __umr__ is None:
- namelist = os.environ.get("SPY_UMR_NAMELIST", None)
- if namelist is not None:
- namelist = namelist.split(',')
- __umr__ = UserModuleReloader(namelist=namelist)
- else:
- verbose = os.environ.get("SPY_UMR_VERBOSE", "").lower() == "true"
- __umr__.run(verbose=verbose)
+
+ if __umr__ is not None:
+ verbose = os.environ.get("SPY_UMR_VERBOSE", "").lower() == "true"
+ __umr__.run(verbose=verbose)
if args is not None and not isinstance(args, basestring):
raise TypeError("expected a character buffer object")
if namespace is None:
|
spyder-ide/spyder-kernels
|
855acc7006fa1a61c03ffda0e379a532a174b540
|
diff --git a/spyder_kernels/customize/tests/test_spydercustomize.py b/spyder_kernels/customize/tests/test_spydercustomize.py
index e478889..771d089 100644
--- a/spyder_kernels/customize/tests/test_spydercustomize.py
+++ b/spyder_kernels/customize/tests/test_spydercustomize.py
@@ -19,29 +19,61 @@ from spyder_kernels.customize.spydercustomize import UserModuleReloader
from spyder_kernels.py3compat import to_text_string
-def test_umr_skip_libmodules(tmpdir):
- """Test that UMR skips library modules and reloads user modules."""
- umr = UserModuleReloader()
-
- # Don't reload stdlib modules
- import xml
- assert umr.is_module_reloadable(xml) == False
-
- # Don't reload third-party modules
- import numpy
- assert umr.is_module_reloadable(numpy) == False
-
- # Reload user modules
[email protected]
+def user_module(tmpdir):
+ """Create a simple module in tmpdir as an example of a user module."""
sys.path.append(to_text_string(tmpdir))
modfile = tmpdir.mkdir('foo').join('bar.py')
code = """
- def square(x):
- return x**2
+def square(x):
+ return x**2
"""
modfile.write(code)
init_file = tmpdir.join('foo').join('__init__.py')
init_file.write('#')
+
+def test_umr_run(user_module):
+ """Test that UMR's run method is working correctly."""
+ umr = UserModuleReloader()
+
+ from foo.bar import square
+ umr.run(verbose=True)
+ umr.modnames_to_reload == ['foo', 'foo.bar']
+
+
+def test_umr_previous_modules(user_module):
+ """Test that UMR's previos_modules is working as expected."""
+ umr = UserModuleReloader()
+
+ import foo
+ assert 'IPython' in umr.previous_modules
+ assert 'foo' not in umr.previous_modules
+
+
+def test_umr_namelist():
+ """Test that the UMR skips modules according to its name."""
+ umr = UserModuleReloader()
+
+ assert umr.is_module_in_namelist('tensorflow')
+ assert umr.is_module_in_namelist('pytorch')
+ assert umr.is_module_in_namelist('spyder_kernels')
+ assert not umr.is_module_in_namelist('foo')
+
+
+def test_umr_pathlist(user_module):
+ """Test that the UMR skips modules according to its path."""
+ umr = UserModuleReloader()
+
+ # Don't reload stdlib modules
+ import xml
+ assert umr.is_module_in_pathlist(xml)
+
+ # Don't reload third-party modules
+ import numpy
+ assert umr.is_module_in_pathlist(numpy)
+
+ # Reload user modules
import foo
- assert umr.is_module_reloadable(foo)
+ assert umr.is_module_in_pathlist(foo) == False
|
NameError: name 'modpath' is not defined
Commit 31694ab74d9c5494a2c6054d2aaeee05cfc9ec15 introduced a bug when running a file a second time.
I'm using the latest `spyder-kernels` on master with the latest `spyder` also on master.
Windows 10, Python 3.7.1 64-bit
```python
Traceback (most recent call last):
File "<ipython-input-3-ec616f24c2fa>", line 1, in <module>
runfile('C:/Users/User/OneDrive/INRS/2017 - Projet INRS PACC/Analyses Baro/calcul_fft.py', wdir='C:/Users/User/OneDrive/INRS/2017 - Projet INRS PACC/Analyses Baro')
File "C:\Users\User\spyder-kernels\spyder_kernels\customize\spydercustomize.py", line 732, in runfile
run_umr()
File "C:\Users\User\spyder-kernels\spyder_kernels\customize\spydercustomize.py", line 716, in run_umr
__umr__.run(verbose=verbose)
File "C:\Users\User\spyder-kernels\spyder_kernels\customize\spydercustomize.py", line 627, in run
if not self.is_module_blacklisted(modname, modpath):
NameError: name 'modpath' is not defined
```
|
0.0
|
855acc7006fa1a61c03ffda0e379a532a174b540
|
[
"spyder_kernels/customize/tests/test_spydercustomize.py::test_umr_run",
"spyder_kernels/customize/tests/test_spydercustomize.py::test_umr_previous_modules",
"spyder_kernels/customize/tests/test_spydercustomize.py::test_umr_namelist"
] |
[] |
{
"failed_lite_validators": [
"has_git_commit_hash",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-06 17:05:39+00:00
|
mit
| 5,694
|
|
barrust__pyprobables-115
|
diff --git a/probables/blooms/bloom.py b/probables/blooms/bloom.py
index 912cc92..1da0311 100644
--- a/probables/blooms/bloom.py
+++ b/probables/blooms/bloom.py
@@ -315,21 +315,9 @@ class BloomFilter:
with open(filename, "w", encoding="utf-8") as file:
print(f"/* BloomFilter Export of a {bloom_type} */", file=file)
print("#include <inttypes.h>", file=file)
- print(
- "const uint64_t estimated_elements = ",
- self.estimated_elements,
- ";",
- sep="",
- file=file,
- )
+ print("const uint64_t estimated_elements = ", self.estimated_elements, ";", sep="", file=file)
print("const uint64_t elements_added = ", self.elements_added, ";", sep="", file=file)
- print(
- "const float false_positive_rate = ",
- self.false_positive_rate,
- ";",
- sep="",
- file=file,
- )
+ print("const float false_positive_rate = ", self.false_positive_rate, ";", sep="", file=file)
print("const uint64_t number_bits = ", self.number_bits, ";", sep="", file=file)
print("const unsigned int number_hashes = ", self.number_hashes, ";", sep="", file=file)
print("const unsigned char bloom[] = {", *data, "};", sep="\n", file=file)
diff --git a/probables/quotientfilter/quotientfilter.py b/probables/quotientfilter/quotientfilter.py
index 3411954..7f5fce9 100644
--- a/probables/quotientfilter/quotientfilter.py
+++ b/probables/quotientfilter/quotientfilter.py
@@ -4,7 +4,7 @@
"""
from array import array
-from typing import Optional
+from typing import Iterator, List, Optional
from probables.hashes import KeyT, SimpleHashT, fnv_1a_32
from probables.utilities import Bitarray
@@ -15,6 +15,7 @@ class QuotientFilter:
Args:
quotient (int): The size of the quotient to use
+ auto_expand (bool): Automatically expand or not
hash_function (function): Hashing strategy function to use `hf(key, number)`
Returns:
QuotientFilter: The initialized filter
@@ -35,18 +36,27 @@ class QuotientFilter:
"_is_continuation",
"_is_shifted",
"_filter",
+ "_max_load_factor",
+ "_auto_resize",
)
- def __init__(self, quotient: int = 20, hash_function: Optional[SimpleHashT] = None): # needs to be parameterized
+ def __init__(
+ self, quotient: int = 20, auto_expand: bool = True, hash_function: Optional[SimpleHashT] = None
+ ): # needs to be parameterized
if quotient < 3 or quotient > 31:
raise ValueError(
f"Quotient filter: Invalid quotient setting; quotient must be between 3 and 31; {quotient} was provided"
)
- self._q = quotient
- self._r = 32 - quotient
- self._size = 1 << self._q # same as 2**q
- self._elements_added = 0
+ self.__set_params(quotient, auto_expand, hash_function)
+
+ def __set_params(self, quotient: int, auto_expand: bool, hash_function: Optional[SimpleHashT]):
+ self._q: int = quotient
+ self._r: int = 32 - quotient
+ self._size: int = 1 << self._q # same as 2**q
+ self._elements_added: int = 0
+ self._auto_resize: bool = auto_expand
self._hash_func: SimpleHashT = fnv_1a_32 if hash_function is None else hash_function # type: ignore
+ self._max_load_factor: float = 0.85
# ensure we use the smallest type possible to reduce memory wastage
if self._r <= 8:
@@ -89,21 +99,61 @@ class QuotientFilter:
return self._elements_added
@property
- def bits_per_elm(self):
+ def bits_per_elm(self) -> int:
"""int: The number of bits used per element"""
return self._bits_per_elm
+ @property
+ def size(self) -> int:
+ """int: The number of bins available in the filter
+
+ Note:
+ same as `num_elements`"""
+ return self._size
+
+ @property
+ def load_factor(self) -> float:
+ """float: The load factor of the filter"""
+ return self._elements_added / self._size
+
+ @property
+ def auto_expand(self) -> bool:
+ """bool: Will the quotient filter automatically expand"""
+ return self._auto_resize
+
+ @auto_expand.setter
+ def auto_expand(self, val: bool):
+ """change the auto expand property"""
+ self._auto_resize = bool(val)
+
+ @property
+ def max_load_factor(self) -> float:
+ """float: The maximum allowed load factor after which auto expanding should occur"""
+ return self._max_load_factor
+
+ @max_load_factor.setter
+ def max_load_factor(self, val: float):
+ """set the maximum load factor"""
+ self._max_load_factor = float(val)
+
def add(self, key: KeyT) -> None:
"""Add key to the quotient filter
Args:
key (str|bytes): The element to add"""
_hash = self._hash_func(key, 0)
+ self.add_alt(_hash)
+
+ def add_alt(self, _hash: int) -> None:
+ """Add the pre-hashed value to the quotient filter
+
+ Args:
+ _hash (int): The element to add"""
key_quotient = _hash >> self._r
key_remainder = _hash & ((1 << self._r) - 1)
-
- if not self._contains(key_quotient, key_remainder):
- # TODO, add it here
+ if self._contained_at_loc(key_quotient, key_remainder) == -1:
+ if self._auto_resize and self.load_factor >= self._max_load_factor:
+ self.resize()
self._add(key_quotient, key_remainder)
def check(self, key: KeyT) -> bool:
@@ -114,9 +164,92 @@ class QuotientFilter:
Return:
bool: True if likely encountered, False if definately not"""
_hash = self._hash_func(key, 0)
+ return self.check_alt(_hash)
+
+ def check_alt(self, _hash: int) -> bool:
+ """Check to see if the pre-calculated hash is likely in the quotient filter
+
+ Args:
+ _hash (int): The element to add
+ Return:
+ bool: True if likely encountered, False if definately not"""
key_quotient = _hash >> self._r
key_remainder = _hash & ((1 << self._r) - 1)
- return self._contains(key_quotient, key_remainder)
+ return not self._contained_at_loc(key_quotient, key_remainder) == -1
+
+ def iter_hashes(self) -> Iterator[int]:
+ """A generator over the hashes in the quotient filter
+
+ Yields:
+ int: The next hash stored in the quotient filter"""
+ queue: List[int] = []
+
+ # find first empty location
+ start = 0
+ while True:
+ is_occupied = self._is_occupied.check_bit(start)
+ is_continuation = self._is_continuation.check_bit(start)
+ is_shifted = self._is_shifted.check_bit(start)
+ if is_occupied + is_continuation + is_shifted == 0:
+ break
+ start += 1
+
+ cur_quot = 0
+ for i in range(start, self._size + start): # this will allow for wrap-arounds
+ idx = i % self._size
+ is_occupied = self._is_occupied.check_bit(idx)
+ is_continuation = self._is_continuation.check_bit(idx)
+ is_shifted = self._is_shifted.check_bit(idx)
+ # Nothing here, keep going
+ if is_occupied + is_continuation + is_shifted == 0:
+ assert len(queue) == 0
+ continue
+
+ if is_occupied == 1: # keep track of the indicies that match a hashed quotient
+ queue.append(idx)
+
+ # run start
+ if not is_continuation and (is_occupied or is_shifted):
+ cur_quot = queue.pop(0)
+
+ if self._filter[idx] != 0:
+ yield (cur_quot << self._r) + self._filter[idx]
+
+ def get_hashes(self) -> List[int]:
+ """Get the hashes from the quotient filter as a list
+
+ Returns:
+ list(int): The hash values stored in the quotient filter"""
+ return list(self.iter_hashes())
+
+ def resize(self, quotient: Optional[int] = None) -> None:
+ """Resize the quotient filter to use the new quotient size
+
+ Args:
+ int: The new quotient to use
+ Note:
+ If `None` is provided, the quotient filter will double in size (quotient + 1)
+ Raises:
+ ValueError: When the new quotient will not accommodate the elements already added"""
+ if quotient is None:
+ quotient = self._q + 1
+
+ if self.elements_added >= (1 << quotient):
+ raise ValueError("Unable to shrink since there will be too many elements in the quotient filter")
+ if quotient < 3 or quotient > 31:
+ raise ValueError(
+ f"Quotient filter: Invalid quotient setting; quotient must be between 3 and 31; {quotient} was provided"
+ )
+
+ hashes = self.get_hashes()
+
+ for i in range(self._size):
+ self._filter[i] = 0
+
+ self.__set_params(quotient, self._auto_resize, self._hash_func)
+
+ for _h in hashes:
+ self.add_alt(_h)
def _shift_insert(self, k, v, start, j, flag):
if self._is_occupied[j] == 0 and self._is_continuation[j] == 0 and self._is_shifted[j] == 0:
@@ -215,9 +348,10 @@ class QuotientFilter:
self._shift_insert(q, r, orig_start_idx, start_idx, 1)
self._elements_added += 1
- def _contains(self, q: int, r: int) -> bool:
+ def _contained_at_loc(self, q: int, r: int) -> int:
+ """returns the index location of the element, or -1 if not present"""
if self._is_occupied[q] == 0:
- return False
+ return -1
start_idx = self._get_start_index(q)
@@ -236,7 +370,7 @@ class QuotientFilter:
break
if self._filter[start_idx] == r:
- return True
+ return start_idx
start_idx = (start_idx + 1) & (self._size - 1)
meta_bits = (
@@ -245,4 +379,4 @@ class QuotientFilter:
+ self._is_shifted.check_bit(start_idx)
)
- return False
+ return -1
diff --git a/pyproject.toml b/pyproject.toml
index ae50c6a..c697c86 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -15,6 +15,7 @@ keywords = [
"bloom-filter",
"count-min-sketch",
"cuckoo-filter",
+ "quotient-filter",
]
readme = "README.rst"
classifiers = [
|
barrust/pyprobables
|
84dbffc9a5a27d5daeed37137efc0b2efc0e8ecc
|
diff --git a/tests/quotientfilter_test.py b/tests/quotientfilter_test.py
index 1f0f1a1..292c5ba 100644
--- a/tests/quotientfilter_test.py
+++ b/tests/quotientfilter_test.py
@@ -38,14 +38,16 @@ class TestQuotientFilter(unittest.TestCase):
self.assertEqual(qf.remainder, 24)
self.assertEqual(qf.elements_added, 0)
self.assertEqual(qf.num_elements, 256) # 2**qf.quotient
+ self.assertTrue(qf.auto_expand)
- qf = QuotientFilter(quotient=24)
+ qf = QuotientFilter(quotient=24, auto_expand=False)
self.assertEqual(qf.bits_per_elm, 8)
self.assertEqual(qf.quotient, 24)
self.assertEqual(qf.remainder, 8)
self.assertEqual(qf.elements_added, 0)
self.assertEqual(qf.num_elements, 16777216) # 2**qf.quotient
+ self.assertFalse(qf.auto_expand)
def test_qf_add_check(self):
"test that the qf is able to add and check elements"
@@ -54,7 +56,7 @@ class TestQuotientFilter(unittest.TestCase):
for i in range(0, 200, 2):
qf.add(str(i))
self.assertEqual(qf.elements_added, 100)
-
+ self.assertEqual(qf.load_factor, 100 / qf.size)
found_no = False
for i in range(0, 200, 2):
if not qf.check(str(i)):
@@ -87,6 +89,102 @@ class TestQuotientFilter(unittest.TestCase):
self.assertEqual(qf.elements_added, 100)
- def test_qf_errors(self):
+ def test_qf_init_errors(self):
+ """test quotient filter initialization errors"""
self.assertRaises(ValueError, lambda: QuotientFilter(quotient=2))
self.assertRaises(ValueError, lambda: QuotientFilter(quotient=32))
+
+ def test_retrieve_hashes(self):
+ """test retrieving hashes back from the quotient filter"""
+ qf = QuotientFilter(quotient=8, auto_expand=False)
+ hashes = []
+ for i in range(255):
+ hashes.append(qf._hash_func(str(i), 0)) # use the private function here..
+ qf.add(str(i))
+ self.assertEqual(qf.size, 256)
+ self.assertEqual(qf.load_factor, 255 / qf.size)
+ out_hashes = qf.get_hashes()
+ self.assertEqual(qf.elements_added, len(out_hashes))
+ self.assertEqual(set(hashes), set(out_hashes))
+
+ def test_resize(self):
+ """test resizing the quotient filter"""
+ qf = QuotientFilter(quotient=8, auto_expand=False)
+ for i in range(200):
+ qf.add(str(i))
+
+ self.assertEqual(qf.elements_added, 200)
+ self.assertEqual(qf.load_factor, 200 / qf.size)
+ self.assertEqual(qf.quotient, 8)
+ self.assertEqual(qf.remainder, 24)
+ self.assertEqual(qf.bits_per_elm, 32)
+ self.assertFalse(qf.auto_expand)
+
+ self.assertRaises(ValueError, lambda: qf.resize(7)) # should be too small to fit
+
+ qf.resize(17)
+ self.assertEqual(qf.elements_added, 200)
+ self.assertEqual(qf.load_factor, 200 / qf.size)
+ self.assertEqual(qf.quotient, 17)
+ self.assertEqual(qf.remainder, 15)
+ self.assertEqual(qf.bits_per_elm, 16)
+ # ensure everything is still accessable
+ for i in range(200):
+ self.assertTrue(qf.check(str(i)))
+
+ def test_auto_resize(self):
+ """test resizing the quotient filter automatically"""
+ qf = QuotientFilter(quotient=8, auto_expand=True)
+ self.assertEqual(qf.max_load_factor, 0.85)
+ self.assertEqual(qf.elements_added, 0)
+ self.assertEqual(qf.load_factor, 0 / qf.size)
+ self.assertEqual(qf.quotient, 8)
+ self.assertEqual(qf.remainder, 24)
+ self.assertEqual(qf.bits_per_elm, 32)
+ self.assertTrue(qf.auto_expand)
+
+ for i in range(220):
+ qf.add(str(i))
+
+ self.assertEqual(qf.max_load_factor, 0.85)
+ self.assertEqual(qf.elements_added, 220)
+ self.assertEqual(qf.load_factor, 220 / qf.size)
+ self.assertEqual(qf.quotient, 9)
+ self.assertEqual(qf.remainder, 23)
+ self.assertEqual(qf.bits_per_elm, 32)
+
+ def test_auto_resize_changed_max_load_factor(self):
+ """test resizing the quotient filter with a different load factor"""
+ qf = QuotientFilter(quotient=8, auto_expand=True)
+ self.assertEqual(qf.max_load_factor, 0.85)
+ self.assertTrue(qf.auto_expand)
+ qf.max_load_factor = 0.65
+ self.assertEqual(qf.max_load_factor, 0.65)
+
+ self.assertEqual(qf.elements_added, 0)
+ self.assertEqual(qf.load_factor, 0 / qf.size)
+ self.assertEqual(qf.quotient, 8)
+ self.assertEqual(qf.remainder, 24)
+ self.assertEqual(qf.bits_per_elm, 32)
+ self.assertTrue(qf.auto_expand)
+
+ for i in range(200):
+ qf.add(str(i))
+
+ self.assertEqual(qf.max_load_factor, 0.85)
+ self.assertEqual(qf.elements_added, 200)
+ self.assertEqual(qf.load_factor, 200 / qf.size)
+ self.assertEqual(qf.quotient, 9)
+ self.assertEqual(qf.remainder, 23)
+ self.assertEqual(qf.bits_per_elm, 32)
+
+ def test_resize_errors(self):
+ """test resizing errors"""
+
+ qf = QuotientFilter(quotient=8, auto_expand=True)
+ for i in range(200):
+ qf.add(str(i))
+
+ self.assertRaises(ValueError, lambda: qf.resize(quotient=2))
+ self.assertRaises(ValueError, lambda: qf.resize(quotient=32))
+ self.assertRaises(ValueError, lambda: qf.resize(quotient=6))
|
quotient filter: additional functionality
Additional functionality to add to the quotient filter:
- Resize / Merge
- Delete element
- Import / Export
Something to consider would be to use a form of bit packing to make it more compact, perhaps as a second class
|
0.0
|
84dbffc9a5a27d5daeed37137efc0b2efc0e8ecc
|
[
"tests/quotientfilter_test.py::TestQuotientFilter::test_auto_resize",
"tests/quotientfilter_test.py::TestQuotientFilter::test_auto_resize_changed_max_load_factor",
"tests/quotientfilter_test.py::TestQuotientFilter::test_qf_add_check",
"tests/quotientfilter_test.py::TestQuotientFilter::test_qf_init",
"tests/quotientfilter_test.py::TestQuotientFilter::test_resize",
"tests/quotientfilter_test.py::TestQuotientFilter::test_resize_errors",
"tests/quotientfilter_test.py::TestQuotientFilter::test_retrieve_hashes"
] |
[
"tests/quotientfilter_test.py::TestQuotientFilter::test_qf_add_check_in",
"tests/quotientfilter_test.py::TestQuotientFilter::test_qf_init_errors"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-13 15:59:00+00:00
|
mit
| 1,294
|
|
asottile__tokenize-rt-3
|
diff --git a/README.md b/README.md
index 3e9e23c..6fdf84d 100644
--- a/README.md
+++ b/README.md
@@ -5,8 +5,9 @@ tokenize-rt
===========
The stdlib `tokenize` module does not properly roundtrip. This wrapper
-around the stdlib provides an additional token `UNIMPORTANT_WS`, and a `Token`
-data type. Use `src_to_tokens` and `tokens_to_src` to roundtrip.
+around the stdlib provides two additional tokens `ESCAPED_NL` and
+`UNIMPORTANT_WS`, and a `Token` data type. Use `src_to_tokens` and
+`tokens_to_src` to roundtrip.
This library is useful if you're writing a refactoring tool based on the
python tokenization.
@@ -21,6 +22,8 @@ python tokenization.
### `tokenize_rt.tokens_to_src(Sequence[Token]) -> text`
+### `tokenize_rt.ECSAPED_NL`
+
### `tokenize_rt.UNIMPORTANT_WS`
### `tokenize_rt.Token(name, src, line=None, utf8_byte_offset=None)`
@@ -28,9 +31,9 @@ python tokenization.
Construct a token
- `name`: one of the token names listed in `token.tok_name` or
- `UNIMPORTANT_WS`
+ `ESCAPED_NL` or `UNIMPORTANT_WS`
- `src`: token's source as text
- `line`: the line number that this token appears on. This will be `None` for
- `UNIMPORTANT_WS` tokens.
+ `ESCAPED_NL` and `UNIMPORTANT_WS` tokens.
- `utf8_byte_offset`: the utf8 byte offset that this token appears on in the
- line. This will be `None` for `UNIMPORTANT_WS` tokens.
+ line. This will be `None` for `ESCAPED_NL` and `UNIMPORTANT_WS` tokens.
diff --git a/tokenize_rt.py b/tokenize_rt.py
index bc5ca7d..4513200 100644
--- a/tokenize_rt.py
+++ b/tokenize_rt.py
@@ -7,6 +7,7 @@ import io
import tokenize
+ESCAPED_NL = 'ESCAPED_NL'
UNIMPORTANT_WS = 'UNIMPORTANT_WS'
Token = collections.namedtuple(
'Token', ('name', 'src', 'line', 'utf8_byte_offset'),
@@ -32,8 +33,16 @@ def src_to_tokens(src):
newtok += lines[lineno]
if scol > 0:
newtok += lines[sline][:scol]
+
+ # a multiline unimportant whitespace may contain escaped newlines
+ while '\\\n' in newtok:
+ ws, nl, newtok = newtok.partition('\\\n')
+ if ws:
+ tokens.append(Token(UNIMPORTANT_WS, ws))
+ tokens.append(Token(ESCAPED_NL, nl))
if newtok:
tokens.append(Token(UNIMPORTANT_WS, newtok))
+
elif scol > last_col:
tokens.append(Token(UNIMPORTANT_WS, line[last_col:scol]))
|
asottile/tokenize-rt
|
8054f9c9edcc217c5224772b1df87beffdbafd53
|
diff --git a/tests/tokenize_rt_test.py b/tests/tokenize_rt_test.py
index d01f0fb..6977e5f 100644
--- a/tests/tokenize_rt_test.py
+++ b/tests/tokenize_rt_test.py
@@ -5,6 +5,7 @@ import io
import pytest
+from tokenize_rt import ESCAPED_NL
from tokenize_rt import main
from tokenize_rt import src_to_tokens
from tokenize_rt import Token
@@ -26,6 +27,41 @@ def test_src_to_tokens_simple():
]
+def test_src_to_tokens_escaped_nl():
+ src = (
+ 'x = \\\n'
+ ' 5'
+ )
+ ret = src_to_tokens(src)
+ assert ret == [
+ Token('NAME', 'x', line=1, utf8_byte_offset=0),
+ Token(UNIMPORTANT_WS, ' ', line=None, utf8_byte_offset=None),
+ Token('OP', '=', line=1, utf8_byte_offset=2),
+ Token(UNIMPORTANT_WS, ' ', line=None, utf8_byte_offset=None),
+ Token(ESCAPED_NL, '\\\n', line=None, utf8_byte_offset=None),
+ Token(UNIMPORTANT_WS, ' ', line=None, utf8_byte_offset=None),
+ Token('NUMBER', '5', line=2, utf8_byte_offset=4),
+ Token('ENDMARKER', '', line=3, utf8_byte_offset=0),
+ ]
+
+
+def test_src_to_tokens_escaped_nl_no_left_ws():
+ src = (
+ 'x =\\\n'
+ ' 5'
+ )
+ ret = src_to_tokens(src)
+ assert ret == [
+ Token('NAME', 'x', line=1, utf8_byte_offset=0),
+ Token(UNIMPORTANT_WS, ' ', line=None, utf8_byte_offset=None),
+ Token('OP', '=', line=1, utf8_byte_offset=2),
+ Token(ESCAPED_NL, '\\\n', line=None, utf8_byte_offset=None),
+ Token(UNIMPORTANT_WS, ' ', line=None, utf8_byte_offset=None),
+ Token('NUMBER', '5', line=2, utf8_byte_offset=4),
+ Token('ENDMARKER', '', line=3, utf8_byte_offset=0),
+ ]
+
+
@pytest.mark.parametrize(
'filename',
(
|
Consider emitting an `ESCAPED_NL` token
For example:
```python
x = y.\
foo(
bar,
)
```
The current tokenization is:
```
1:0 NAME 'x'
?:? UNIMPORTANT_WS ' '
1:2 OP '='
?:? UNIMPORTANT_WS ' '
1:4 NAME 'y'
1:5 OP '.'
?:? UNIMPORTANT_WS '\\\n '
2:4 NAME 'foo'
2:7 OP '('
2:8 NL '\n'
?:? UNIMPORTANT_WS ' '
3:8 NAME 'bar'
3:11 OP ','
3:12 NL '\n'
?:? UNIMPORTANT_WS ' '
4:4 OP ')'
4:5 NEWLINE '\n'
5:0 ENDMARKER ''
```
It would be cool to split the UNIMPORTANT_WS token which contains the escaped newline
|
0.0
|
8054f9c9edcc217c5224772b1df87beffdbafd53
|
[
"tests/tokenize_rt_test.py::test_src_to_tokens_simple",
"tests/tokenize_rt_test.py::test_roundtrip_tokenize[testing/resources/empty.py]",
"tests/tokenize_rt_test.py::test_roundtrip_tokenize[testing/resources/unicode_snowman.py]",
"tests/tokenize_rt_test.py::test_roundtrip_tokenize[testing/resources/backslash_continuation.py]",
"tests/tokenize_rt_test.py::test_main"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-07-14 15:18:45+00:00
|
mit
| 1,202
|
|
m0nhawk__grafana_api-39
|
diff --git a/Pipfile b/Pipfile
index f534687..2bc2f58 100644
--- a/Pipfile
+++ b/Pipfile
@@ -8,7 +8,7 @@ name = "grafana_api"
[dev-packages]
codecov = "~=2.0"
coverage = "~=4.5"
-mock = {version = "*", markers = "python_version <= '2.7'"}
+mock = {version = "*",markers = "python_version <= '2.7'"}
pylint = ">=1.9"
requests-mock = "~=1.6"
unittest-xml-reporting = "~=2.5"
diff --git a/grafana_api/grafana_api.py b/grafana_api/grafana_api.py
index 7c81439..e965768 100644
--- a/grafana_api/grafana_api.py
+++ b/grafana_api/grafana_api.py
@@ -94,25 +94,17 @@ class GrafanaAPI:
r = runner(
__url, json=json, headers=headers, auth=self.auth, verify=self.verify
)
- try:
-
- if 500 <= r.status_code < 600:
- raise GrafanaServerError(
- "Client Error {0}: {1}".format(r.status_code, r.json()['message'])
- )
- elif r.status_code == 400:
- raise GrafanaBadInputError("Bad Input: `{0}`".format(r.text))
- elif r.status_code == 401:
- raise GrafanaUnauthorizedError("Unauthorized")
- elif 400 <= r.status_code < 500:
- raise GrafanaClientError(
- "Client Error {0}: {1}".format(r.status_code, r.text)
- )
- return r.json()
-
- except Exception as error:
- print('Caught this error: ' + repr(error))
-
-
-
+ if 500 <= r.status_code < 600:
+ raise GrafanaServerError(
+ "Client Error {0}: {1}".format(r.status_code, r.json()['message'])
+ )
+ elif r.status_code == 400:
+ raise GrafanaBadInputError("Bad Input: `{0}`".format(r.text))
+ elif r.status_code == 401:
+ raise GrafanaUnauthorizedError("Unauthorized")
+ elif 400 <= r.status_code < 500:
+ raise GrafanaClientError(
+ "Client Error {0}: {1}".format(r.status_code, r.text)
+ )
+ return r.json()
return __request_runnner
|
m0nhawk/grafana_api
|
bfbe1dd6a4e90e271b036444942faf1ad6b70784
|
diff --git a/test/api/test_annotations.py b/test/api/test_annotations.py
index 3299748..3f278f7 100644
--- a/test/api/test_annotations.py
+++ b/test/api/test_annotations.py
@@ -99,29 +99,29 @@ class AnnotationsTestCase(unittest.TestCase):
@requests_mock.Mocker()
def test_delete_annotations_by_id_could_not_find(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Could not find annotation to update"},status_code=500)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaServerError)
+ with self.assertRaises(GrafanaServerError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
def test_delete_annotations_by_id_forbidden(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Forbidden"},
status_code=403)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaClientError)
+ with self.assertRaises(GrafanaClientError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
def test_delete_annotations_by_id_unauthorized(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Unauthorized"},
status_code=401)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaUnauthorizedError)
+ with self.assertRaises(GrafanaUnauthorizedError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
def test_delete_annotations_by_id_bad_input(self, m):
m.delete("http://localhost/api/annotations/None", json={"message": "Bad Input"},
status_code=400)
- response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
- self.assertRaises(GrafanaBadInputError)
+ with self.assertRaises(GrafanaBadInputError):
+ response = self.cli.annotations.delete_annotations_by_id(annotations_id=None)
@requests_mock.Mocker()
diff --git a/test/api/test_folder.py b/test/api/test_folder.py
index 66b5f44..319eeaa 100644
--- a/test/api/test_folder.py
+++ b/test/api/test_folder.py
@@ -92,8 +92,8 @@ class FolderTestCase(unittest.TestCase):
"message": "Folder title cannot be empty"
}, status_code=400
)
- folder = self.cli.folder.create_folder(title="Departmenet ABC")
- self.assertRaises(GrafanaBadInputError)
+ with self.assertRaises(GrafanaBadInputError):
+ folder = self.cli.folder.create_folder(title="Departmenet ABC")
@requests_mock.Mocker()
def test_update_folder(self, m):
diff --git a/test/api/test_search.py b/test/api/test_search.py
index 134696b..cffa33c 100644
--- a/test/api/test_search.py
+++ b/test/api/test_search.py
@@ -46,5 +46,5 @@ class AnnotationsTestCase(unittest.TestCase):
}, status_code=400
)
- result = self.cli.search.search_dashboards()
- self.assertRaises(GrafanaBadInputError)
+ with self.assertRaises(GrafanaBadInputError):
+ result = self.cli.search.search_dashboards()
|
grafana_api does not pass errors to caller anymore
**Describe the bug**
```
in grafana_api.py, line 114:
except Exception as error:
print('Caught this error: ' + repr(error))
```
you are catching all exceptions you were throwing before - with the result, that all issues are hidden to the code that actually uses grafana_api. This is pretty much broken.
According to the commit, this is supposed to fix unit tests. If this fixes unit tests, they are also broken...
Expected would be that the errors are being raised.
|
0.0
|
bfbe1dd6a4e90e271b036444942faf1ad6b70784
|
[
"test/api/test_search.py::AnnotationsTestCase::test_search_dashboards_with_out_filter",
"test/api/test_folder.py::FolderTestCase::test_create_folder_empty_uid",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_forbidden",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_could_not_find",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_bad_input",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id_unauthorized"
] |
[
"test/api/test_search.py::AnnotationsTestCase::test_search_dashboards",
"test/api/test_folder.py::FolderTestCase::test_get_folder_by_id",
"test/api/test_folder.py::FolderTestCase::test_update_folder",
"test/api/test_folder.py::FolderTestCase::test_get_folder_permissions",
"test/api/test_folder.py::FolderTestCase::test_delete_folder",
"test/api/test_folder.py::FolderTestCase::test_update_folder_permissions",
"test/api/test_folder.py::FolderTestCase::test_update_folder_some_param",
"test/api/test_folder.py::FolderTestCase::test_get_folder",
"test/api/test_folder.py::FolderTestCase::test_create_folder",
"test/api/test_folder.py::FolderTestCase::test_get_all_folders",
"test/api/test_annotations.py::AnnotationsTestCase::test_update_annotation",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_id",
"test/api/test_annotations.py::AnnotationsTestCase::test_add_annotation",
"test/api/test_annotations.py::AnnotationsTestCase::test_annotations_with_out_param",
"test/api/test_annotations.py::AnnotationsTestCase::test_delete_annotations_by_region_id",
"test/api/test_annotations.py::AnnotationsTestCase::test_add_annotation_graphite",
"test/api/test_annotations.py::AnnotationsTestCase::test_annotations"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-08-19 17:18:02+00:00
|
mit
| 3,661
|
|
hCaptcha__hmt-basemodels-47
|
diff --git a/basemodels/manifest/data/preprocess.py b/basemodels/manifest/data/preprocess.py
new file mode 100644
index 0000000..8ba5827
--- /dev/null
+++ b/basemodels/manifest/data/preprocess.py
@@ -0,0 +1,12 @@
+from schematics.models import Model
+from schematics.types import StringType, DictType, UnionType, IntType, FloatType
+
+class Preprocess(Model):
+ pipeline = StringType(required=True,choices=["FaceBlurPipeline"])
+ config = DictType(UnionType([FloatType, IntType, StringType]))
+
+ def to_dict(self):
+ p = { "pipeline": self.pipeline }
+ if self.config is not None:
+ p["config"] = self.config
+ return p
\ No newline at end of file
diff --git a/basemodels/manifest/manifest.py b/basemodels/manifest/manifest.py
index ba082d7..0da9ca2 100644
--- a/basemodels/manifest/manifest.py
+++ b/basemodels/manifest/manifest.py
@@ -9,6 +9,7 @@ from schematics.types import StringType, DecimalType, BooleanType, IntType, Dict
from .data.groundtruth import validate_groundtruth_entry
from .data.taskdata import validate_taskdata_entry
+from .data.preprocess import Preprocess
BASE_JOB_TYPES = [
"image_label_binary",
diff --git a/basemodels/pydantic/__init__.py b/basemodels/pydantic/__init__.py
index 66c6f10..fb3f851 100644
--- a/basemodels/pydantic/__init__.py
+++ b/basemodels/pydantic/__init__.py
@@ -1,3 +1,4 @@
from .manifest import validate_manifest_uris, Manifest, NestedManifest, RequestConfig, TaskData, Webhook
from .manifest.data import validate_taskdata_entry, validate_groundtruth_entry
from .via import ViaDataManifest
+from .manifest.data.preprocess import Pipeline, Preprocess
\ No newline at end of file
diff --git a/basemodels/pydantic/manifest/data/preprocess.py b/basemodels/pydantic/manifest/data/preprocess.py
new file mode 100644
index 0000000..32f8667
--- /dev/null
+++ b/basemodels/pydantic/manifest/data/preprocess.py
@@ -0,0 +1,16 @@
+import enum
+import typing
+import pydantic
+
+class Pipeline(str, enum.Enum):
+ FaceBlurPipeline = 'FaceBlurPipeline'
+
+class Preprocess(pydantic.BaseModel):
+ pipeline: Pipeline
+ config: typing.Optional[dict]
+
+ def to_dict(self):
+ p = { "pipeline": self.pipeline.value }
+ if self.config is not None:
+ p["config"] = self.config
+ return p
\ No newline at end of file
diff --git a/pyproject.toml b/pyproject.toml
index 1fbdcc4..8156772 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "hmt-basemodels"
-version = "0.1.1"
+version = "0.1.2"
description = ""
authors = ["Intuition Machines, Inc <[email protected]>"]
packages = [
diff --git a/setup.py b/setup.py
index 4ff40fd..264080f 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@ import setuptools
setuptools.setup(
name="hmt-basemodels",
- version="0.1.1",
+ version="0.1.2",
author="HUMAN Protocol",
description="Common data models shared by various components of the Human Protocol stack",
url="https://github.com/hCaptcha/hmt-basemodels",
|
hCaptcha/hmt-basemodels
|
86c71b032a082fc86b14ff885989592aab015666
|
diff --git a/tests/test_preprocess.py b/tests/test_preprocess.py
new file mode 100644
index 0000000..3f3776b
--- /dev/null
+++ b/tests/test_preprocess.py
@@ -0,0 +1,41 @@
+import unittest
+
+from schematics.exceptions import DataError
+from basemodels.manifest import Preprocess
+
+class PipelineTest(unittest.TestCase):
+ def test_preprocess(self):
+ config = {}
+ p = Preprocess({"pipeline": "FaceBlurPipeline", "config": config})
+
+ self.assertEqual(p.pipeline, "FaceBlurPipeline")
+ self.assertEqual(p.config, config)
+
+ p = Preprocess({"pipeline": "FaceBlurPipeline"})
+
+ self.assertIsNone(p.config)
+
+
+ def test_preprocess_raise(self):
+ with self.assertRaises(DataError):
+ Preprocess().validate()
+
+ with self.assertRaises(DataError):
+ Preprocess({"pipeline": ""}).validate()
+
+ with self.assertRaises(DataError):
+ Preprocess({"pipeline": "FaceBlurPipeline", "config": 1}).validate()
+
+
+ def test_preprocess_to_dict(self):
+ config = { "radius": 3 }
+ p = Preprocess({"pipeline": "FaceBlurPipeline", "config": config})
+
+ self.assertEqual(p.to_dict(), { "pipeline": "FaceBlurPipeline", "config": config })
+
+ p = Preprocess({"pipeline": "FaceBlurPipeline"})
+
+ self.assertEqual(p.to_dict(), { "pipeline": "FaceBlurPipeline" })
+
+
+
diff --git a/tests/test_pydantic_preprocess.py b/tests/test_pydantic_preprocess.py
new file mode 100644
index 0000000..4731b35
--- /dev/null
+++ b/tests/test_pydantic_preprocess.py
@@ -0,0 +1,41 @@
+import unittest
+
+from pydantic.error_wrappers import ValidationError
+from basemodels.pydantic import Preprocess, Pipeline
+
+class PipelineTest(unittest.TestCase):
+ def test_preprocess(self):
+ config = {}
+ p = Preprocess(pipeline=Pipeline.FaceBlurPipeline, config=config)
+
+ self.assertEqual(p.pipeline, Pipeline.FaceBlurPipeline)
+ self.assertEqual(p.config, config)
+
+ p = Preprocess(pipeline=Pipeline.FaceBlurPipeline)
+
+ self.assertIsNone(p.config)
+
+
+ def test_preprocess_raise(self):
+ with self.assertRaises(ValidationError):
+ Preprocess()
+
+ with self.assertRaises(ValidationError):
+ Preprocess(pipeline="")
+
+ with self.assertRaises(ValidationError):
+ Preprocess(pipeline=Pipeline.FaceBlurPipeline, config=1)
+
+
+ def test_preprocess_to_dict(self):
+ config = { "radius": 3 }
+ p = Preprocess(pipeline=Pipeline.FaceBlurPipeline, config=config)
+
+ self.assertEqual(p.to_dict(), { "pipeline": Pipeline.FaceBlurPipeline.value, "config": config })
+
+ p = Preprocess(pipeline=Pipeline.FaceBlurPipeline)
+
+ self.assertEqual(p.to_dict(), { "pipeline": Pipeline.FaceBlurPipeline.value })
+
+
+
|
add preprocessing schema
add basic preprocessing schema for labeling request image preprocessing configuration
|
0.0
|
86c71b032a082fc86b14ff885989592aab015666
|
[
"tests/test_preprocess.py::PipelineTest::test_preprocess",
"tests/test_preprocess.py::PipelineTest::test_preprocess_raise",
"tests/test_preprocess.py::PipelineTest::test_preprocess_to_dict",
"tests/test_pydantic_preprocess.py::PipelineTest::test_preprocess",
"tests/test_pydantic_preprocess.py::PipelineTest::test_preprocess_raise",
"tests/test_pydantic_preprocess.py::PipelineTest::test_preprocess_to_dict"
] |
[] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-29 16:27:22+00:00
|
mit
| 2,689
|
|
duosecurity__duo_client_python-86
|
diff --git a/duo_client/admin.py b/duo_client/admin.py
index 48bb1d6..c482019 100644
--- a/duo_client/admin.py
+++ b/duo_client/admin.py
@@ -2001,7 +2001,7 @@ class Admin(client.Client):
return self.json_api_call('GET', url + group_id, {})
- def get_group_users(self, group_id, limit=100, offset=0):
+ def get_group_users(self, group_id, limit=None, offset=0):
"""
Get a paginated list of users associated with the specified
group.
@@ -2010,13 +2010,28 @@ class Admin(client.Client):
limit - The maximum number of records to return. Maximum is 500. (Optional)
offset - The offset of the first record to return. (Optional)
"""
- return self.json_api_call(
+ (limit, offset) = self.normalize_paging_args(limit, offset)
+ if limit:
+ return self.json_api_call(
+ 'GET',
+ '/admin/v2/groups/' + group_id + '/users',
+ {
+ 'limit': limit,
+ 'offset': offset,
+ })
+ return list(self.get_group_users_iterator(group_id))
+
+ def get_group_users_iterator(self, group_id):
+ """
+ Returns an iterator of users associated with the specified group.
+
+ group_id - The id of the group (Required)
+ """
+ return self.json_paging_api_call(
'GET',
'/admin/v2/groups/' + group_id + '/users',
- {
- 'limit': str(limit),
- 'offset': str(offset),
- })
+ {}
+ )
def create_group(self, name,
desc=None,
|
duosecurity/duo_client_python
|
c69c3da2e13f8a7680c06d94f80fadb23eb49ec9
|
diff --git a/tests/admin/test_groups.py b/tests/admin/test_groups.py
index 6bbed7f..7e516e1 100644
--- a/tests/admin/test_groups.py
+++ b/tests/admin/test_groups.py
@@ -119,7 +119,7 @@ class TestGroups(TestAdmin):
def test_get_group_users(self):
""" Test for getting list of users associated with a group
"""
- response = self.client.get_group_users('ABC123')
+ response = self.client_list.get_group_users('ABC123')[0]
uri, args = response['uri'].split('?')
self.assertEqual(response['method'], 'GET')
@@ -132,6 +132,69 @@ class TestGroups(TestAdmin):
'offset': ['0'],
})
+ def test_get_group_users_with_offset(self):
+ """Test to get users by group id with pagination params
+ """
+ response = self.client_list.get_group_users('ABC123', offset=30)[0]
+ uri, args = response['uri'].split('?')
+
+ self.assertEqual(response['method'], 'GET')
+ self.assertEqual(uri, '/admin/v2/groups/ABC123/users')
+ self.assertEqual(util.params_to_dict(args),
+ {
+ 'account_id':[self.client.account_id],
+ 'limit': ['100'],
+ 'offset': ['0'],
+ })
+
+ def test_get_group_users_with_limit(self):
+ """Test to get users by group id with pagination params
+ """
+ response = self.client_list.get_group_users('ABC123', limit=30)[0]
+ uri, args = response['uri'].split('?')
+
+ self.assertEqual(response['method'], 'GET')
+ self.assertEqual(uri, '/admin/v2/groups/ABC123/users')
+ self.assertEqual(util.params_to_dict(args),
+ {
+ 'account_id':[self.client.account_id],
+ 'limit': ['30'],
+ 'offset': ['0'],
+ })
+
+ def test_get_group_users_with_limit_and_offset(self):
+ """Test to get users by group id with pagination params
+ """
+ response = self.client_list.get_group_users(
+ 'ABC123', limit=30, offset=60)[0]
+ uri, args = response['uri'].split('?')
+
+ self.assertEqual(response['method'], 'GET')
+ self.assertEqual(uri, '/admin/v2/groups/ABC123/users')
+ self.assertEqual(util.params_to_dict(args),
+ {
+ 'account_id':[self.client.account_id],
+ 'limit': ['30'],
+ 'offset': ['60'],
+ })
+
+ def test_get_group_users_iterator(self):
+ """Test to get user iterator by group id
+ """
+ iterator = self.client_list.get_group_users_iterator(
+ 'ABC123')
+ response = next(iterator)
+ uri, args = response['uri'].split('?')
+
+ self.assertEqual(response['method'], 'GET')
+ self.assertEqual(uri, '/admin/v2/groups/ABC123/users')
+ self.assertEqual(util.params_to_dict(args),
+ {
+ 'account_id':[self.client.account_id],
+ 'limit': ['100'],
+ 'offset': ['0'],
+ })
+
def test_delete_group(self):
""" Test for deleting a group
"""
|
group pagination not part of the code
Hi,
In `duo_client/admin.py`, there are `get_user_groups` and `get_user_groups_iterator` which (via `json_paging_api_call` in `duo_client/client.py`) seem to be doing the pagination for us (kinda already touched on in #63).
Minor note: per https://duo.com/docs/adminapi#retrieve-groups-by-user-id, `get_user_groups` should have limits of 100/500, but the API shows None. I don't particularly care here - you're doing the paging for me, so, "cool!"
What I'm opening an issue over is, `get_group_users` looks the same as `get_user_groups` from the docs https://duo.com/docs/adminapi#v2-groups-get-users. ("Same" as in, both API calls take optional paging-related parameters, with limits of 100/500 defined; obviously they return different data.) But there's no similar iterator/listmaker offered for `get_group_users` - you get back one limit-sized slurp of data. I'm not seeing the metadata get to me from a call to `get_group_users` in order to manage my own pagination, but that could be an error on my part.
If it's an oversight that `get_group_users` doesn't page, well, here's a request. If it's deliberate, is there something you can maybe drop in a comment for why this call is seemingly unique/different?
Thanks!
|
0.0
|
c69c3da2e13f8a7680c06d94f80fadb23eb49ec9
|
[
"tests/admin/test_groups.py::TestGroups::test_get_group_users_iterator",
"tests/admin/test_groups.py::TestGroups::test_get_group_users_with_offset"
] |
[
"tests/admin/test_groups.py::TestGroups::test_delete_group",
"tests/admin/test_groups.py::TestGroups::test_get_group_users",
"tests/admin/test_groups.py::TestGroups::test_get_group_users_with_limit",
"tests/admin/test_groups.py::TestGroups::test_get_group_users_with_limit_and_offset",
"tests/admin/test_groups.py::TestGroups::test_get_group_v1",
"tests/admin/test_groups.py::TestGroups::test_get_group_v2",
"tests/admin/test_groups.py::TestGroups::test_get_groups",
"tests/admin/test_groups.py::TestGroups::test_get_groups_generator",
"tests/admin/test_groups.py::TestGroups::test_get_groups_limit",
"tests/admin/test_groups.py::TestGroups::test_get_groups_limit_offset",
"tests/admin/test_groups.py::TestGroups::test_get_groups_offset",
"tests/admin/test_groups.py::TestGroups::test_modify_group"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_issue_reference"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-25 18:34:20+00:00
|
bsd-3-clause
| 2,029
|
|
Kinto__kinto-1943
|
diff --git a/docs/commandline.rst b/docs/commandline.rst
index e56d2080..7c1addef 100644
--- a/docs/commandline.rst
+++ b/docs/commandline.rst
@@ -139,3 +139,19 @@ For example:
::
kinto rebuild-quotas --ini=config/postgresql.ini
+
+Flush Cache
+-----------
+
+Clears the Backend Cache.This can be useful for
+debugging.
+
+::
+
+ kinto flush-cache [--ini INI_FILE]
+
+For example:
+
+::
+
+ kinto flush-cache --ini kinto.ini
\ No newline at end of file
diff --git a/kinto/__main__.py b/kinto/__main__.py
index 4b935afe..dc2142b9 100644
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -24,12 +24,13 @@ def main(args=None):
if args is None:
args = sys.argv[1:]
- parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface")
+ parser = argparse.ArgumentParser(description="Kinto Command-Line Interface")
commands = (
"init",
"start",
"migrate",
"delete-collection",
+ "flush-cache",
"version",
"rebuild-quotas",
"create-user",
@@ -94,26 +95,34 @@ def main(args=None):
required=False,
default="127.0.0.1",
)
+
elif command == "migrate":
subparser.add_argument(
"--dry-run",
action="store_true",
- help="Simulate the migration operations " "and show information",
+ help="Simulate the migration operations and show information",
dest="dry_run",
required=False,
default=False,
)
+
elif command == "delete-collection":
subparser.add_argument(
- "--bucket", help="The bucket where the collection " "belongs to.", required=True
+ "--bucket",
+ help="The bucket where the collection belongs to.",
+ required=True
+ )
+ subparser.add_argument(
+ "--collection",
+ help="The collection to remove.",
+ required=True
)
- subparser.add_argument("--collection", help="The collection to remove.", required=True)
elif command == "rebuild-quotas":
subparser.add_argument(
"--dry-run",
action="store_true",
- help="Simulate the rebuild operation " "and show information",
+ help="Simulate the rebuild operation and show information",
dest="dry_run",
required=False,
default=False,
@@ -225,6 +234,10 @@ def main(args=None):
env, parsed_args["bucket"], parsed_args["collection"]
)
+ elif which_command == "flush-cache":
+ env = bootstrap(config_file, options={"command": "flush-cache"})
+ core_scripts.flush_cache(env)
+
elif which_command == "rebuild-quotas":
dry_run = parsed_args["dry_run"]
env = bootstrap(config_file, options={"command": "rebuild-quotas"})
diff --git a/kinto/core/scripts.py b/kinto/core/scripts.py
index 83a4a5ff..5c89fb06 100644
--- a/kinto/core/scripts.py
+++ b/kinto/core/scripts.py
@@ -25,3 +25,10 @@ def migrate(env, dry_run=False):
logger.error(message)
else:
getattr(registry, backend).initialize_schema(dry_run=dry_run)
+
+
+def flush_cache(env):
+ registry = env["registry"]
+ registry.cache.flush()
+ logger.info(f"Cache has been cleared.")
+ return 0
|
Kinto/kinto
|
173d8eabe81a19c709e22341f64a6f65d6fcff2c
|
diff --git a/tests/core/test_scripts.py b/tests/core/test_scripts.py
index 4a00c8d2..6aa241c5 100644
--- a/tests/core/test_scripts.py
+++ b/tests/core/test_scripts.py
@@ -40,3 +40,8 @@ class InitSchemaTest(unittest.TestCase):
reg.storage.initialize_schema.assert_called_with(dry_run=True)
reg.cache.initialize_schema.assert_called_with(dry_run=True)
reg.permission.initialize_schema.assert_called_with(dry_run=True)
+
+ def test_flush_cache_clear_the_cache_backend(self):
+ scripts.flush_cache({"registry": self.registry})
+ reg = self.registry
+ reg.cache.flush.assert_called_with()
diff --git a/tests/test_main.py b/tests/test_main.py
index 2dfe4a80..73acce40 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -362,3 +362,22 @@ class TestMain(unittest.TestCase):
mocked_logging.basicConfig.assert_called_with(
level=logging.INFO, format=DEFAULT_LOG_FORMAT
)
+
+ def test_cli_flush_cache_command_runs_flush_cache_script(self):
+ # Build a temporary ini file.
+ res = main(
+ [
+ "init",
+ "--ini",
+ TEMP_KINTO_INI,
+ "--backend",
+ "memory",
+ "--cache-backend",
+ "memory",
+ ]
+ )
+ assert res == 0
+ with mock.patch("kinto.__main__.core_scripts.flush_cache") as mocked_cache_script:
+ res = main(["flush-cache", "--ini", TEMP_KINTO_INI])
+ assert res == 0
+ assert mocked_cache_script.call_count == 1
|
Add a clear cache command to kinto
For debugging purposes mainly we want to be able to clear the cache on time to time
|
0.0
|
173d8eabe81a19c709e22341f64a6f65d6fcff2c
|
[
"tests/core/test_scripts.py::InitSchemaTest::test_flush_cache_clear_the_cache_backend",
"tests/test_main.py::TestMain::test_cli_flush_cache_command_runs_flush_cache_script"
] |
[
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_calls_initialize_schema_on_backends",
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_in_dry_run_mode",
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_in_read_only_display_an_error",
"tests/core/test_scripts.py::InitSchemaTest::test_migrate_skips_missing_backends",
"tests/test_main.py::TestMain::test_cli_can_configure_logger_in_debug",
"tests/test_main.py::TestMain::test_cli_can_configure_logger_in_quiet",
"tests/test_main.py::TestMain::test_cli_can_display_kinto_version",
"tests/test_main.py::TestMain::test_cli_create_user_runs_account_script",
"tests/test_main.py::TestMain::test_cli_delete_collection_run_delete_collection_script",
"tests/test_main.py::TestMain::test_cli_init_asks_for_backend_if_not_specified",
"tests/test_main.py::TestMain::test_cli_init_asks_until_backend_is_valid",
"tests/test_main.py::TestMain::test_cli_init_asks_until_cache_backend_is_valid",
"tests/test_main.py::TestMain::test_cli_init_generates_configuration",
"tests/test_main.py::TestMain::test_cli_init_installs_memcached_dependencies_if_needed",
"tests/test_main.py::TestMain::test_cli_init_installs_postgresql_dependencies_if_needed",
"tests/test_main.py::TestMain::test_cli_init_installs_redis_dependencies_if_needed",
"tests/test_main.py::TestMain::test_cli_init_returns_if_file_exists",
"tests/test_main.py::TestMain::test_cli_migrate_command_runs_init_schema",
"tests/test_main.py::TestMain::test_cli_rebuild_quotas_run_rebuild_quotas_script",
"tests/test_main.py::TestMain::test_cli_start_runs_pserve",
"tests/test_main.py::TestMain::test_cli_start_with_quiet_option_runs_pserve_with_quiet",
"tests/test_main.py::TestMain::test_cli_start_with_reload_runs_pserve_with_reload",
"tests/test_main.py::TestMain::test_cli_start_with_verbose_option_runs_pserve_with_verbose",
"tests/test_main.py::TestMain::test_cli_use_default_logging_logger",
"tests/test_main.py::TestMain::test_fails_if_not_enough_args",
"tests/test_main.py::TestMain::test_main_takes_sys_argv_by_default"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-12-20 11:11:50+00:00
|
apache-2.0
| 311
|
|
mixkorshun__django-antispam-6
|
diff --git a/antispam/akismet/entities.py b/antispam/akismet/entities.py
index 91d766a..a620516 100644
--- a/antispam/akismet/entities.py
+++ b/antispam/akismet/entities.py
@@ -1,6 +1,6 @@
from datetime import datetime
-from .utils import get_client_ip, get_timestamp
+from .utils import get_client_ip
class Request:
@@ -152,7 +152,7 @@ class Comment:
params = {
'comment_type': self.type,
'comment_content': self.content,
- 'comment_date': get_timestamp(self.created),
+ 'comment_date': self.created,
'permalink': self.permalink,
}
diff --git a/antispam/akismet/utils.py b/antispam/akismet/utils.py
index b6cdf72..c292c1a 100644
--- a/antispam/akismet/utils.py
+++ b/antispam/akismet/utils.py
@@ -18,11 +18,3 @@ def get_client_ip(request):
return x_forwarded_for.split(',')[0]
return request.META.get('REMOTE_ADDR')
-
-
-def get_timestamp(dt):
- try:
- return int(dt.timestamp())
- except AttributeError:
- import time
- return int(time.mktime(dt.timetuple()))
|
mixkorshun/django-antispam
|
af42b24ee4dd5d9607e5f8ac0cffb434f9a63e0a
|
diff --git a/tests/akismet/test_entities.py b/tests/akismet/test_entities.py
index 2f92116..c1e536d 100644
--- a/tests/akismet/test_entities.py
+++ b/tests/akismet/test_entities.py
@@ -70,7 +70,7 @@ class CommentTests(TestCase):
self.assertEqual({
'comment_content': '<my comment>',
- 'comment_date': int(time.mktime(comment.created.timetuple())),
+ 'comment_date': comment.created,
'comment_type': 'comment',
'permalink': 'http://mike.example.com/comment-1/',
}, comment.as_params())
|
Akismet: AssertionError on comment_date type
python-akismet expects comment_date to be `datetime`, not a timestamp:
https://github.com/Nekmo/python-akismet/blob/322cf3e9a9a1986434a20e01cabb6767e91a226b/akismet/__init__.py#L49
django-antispam provides a timestamp:
https://github.com/mixkorshun/django-antispam/blob/3b193a8360ff8171f0c6d7cc1891fab0b69d0e0a/antispam/akismet/entities.py#L155
So I get an `AssertionError` when calling `akismet.check`
|
0.0
|
af42b24ee4dd5d9607e5f8ac0cffb434f9a63e0a
|
[
"tests/akismet/test_entities.py::CommentTests::test_to_params"
] |
[
"tests/akismet/test_entities.py::AuthorTests::test_to_params",
"tests/akismet/test_entities.py::AuthorTests::test_from_django_user",
"tests/akismet/test_entities.py::SiteTests::test_to_params",
"tests/akismet/test_entities.py::RequestTests::test_to_params",
"tests/akismet/test_entities.py::RequestTests::test_from_django_request",
"tests/akismet/test_entities.py::CommentTests::test_to_params_related_resources"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-03-22 13:02:25+00:00
|
mit
| 3,986
|
|
albertyw__git-reviewers-30
|
diff --git a/git_reviewers/reviewers.py b/git_reviewers/reviewers.py
index e4eee15..77ec78e 100755
--- a/git_reviewers/reviewers.py
+++ b/git_reviewers/reviewers.py
@@ -101,6 +101,18 @@ class FindLogReviewers(FindFileLogReviewers):
""" Find the changed files between current status and master """
git_diff_files_command = ['git', 'diff', 'master', '--name-only']
git_diff_files = self.run_command(git_diff_files_command)
+ if not git_diff_files:
+ return FindHistoricalReviewers().get_changed_files()
+ return git_diff_files
+
+
+class FindHistoricalReviewers(FindFileLogReviewers):
+ def get_changed_files(self) -> List[str]:
+ """Find all git files """
+ git_diff_files_command = [
+ 'git', 'ls-tree', '-r', 'master', '--name-only'
+ ]
+ git_diff_files = self.run_command(git_diff_files_command)
return git_diff_files
|
albertyw/git-reviewers
|
c3a916e554a058e3e3e3527f5692d0efed97ed83
|
diff --git a/git_reviewers/tests/test.py b/git_reviewers/tests/test.py
index 87e2df4..933ebd6 100644
--- a/git_reviewers/tests/test.py
+++ b/git_reviewers/tests/test.py
@@ -110,6 +110,27 @@ class TestLogReviewers(unittest.TestCase):
files = self.finder.get_changed_files()
self.assertEqual(files, ['README.rst', 'setup.py'])
+ @patch('git_reviewers.reviewers.FindHistoricalReviewers')
+ @patch('subprocess.run')
+ def test_no_diffs(self, mock_run, mock_historical):
+ process = MagicMock()
+ process.stdout = b''
+ mock_run.return_value = process
+ mock_historical().get_changed_files.return_value = ['asdf']
+ files = self.finder.get_changed_files()
+ self.assertEqual(files, ['asdf'])
+
+
+class TestHistoricalReviewers(unittest.TestCase):
+ def setUp(self):
+ self.finder = reviewers.FindHistoricalReviewers()
+
+ def test_get_changed_files(self):
+ changed_files = ['README.rst', 'setup.py']
+ self.finder.run_command = MagicMock(return_value=changed_files)
+ files = self.finder.get_changed_files()
+ self.assertEqual(files, ['README.rst', 'setup.py'])
+
class TestFindArcCommitReviewers(unittest.TestCase):
def setUp(self):
|
Make git-reviewers work with commits that only add new files
Read reviewers from entire repository history
|
0.0
|
c3a916e554a058e3e3e3527f5692d0efed97ed83
|
[
"git_reviewers/tests/test.py::TestLogReviewers::test_no_diffs",
"git_reviewers/tests/test.py::TestHistoricalReviewers::test_get_changed_files"
] |
[
"git_reviewers/tests/test.py::TestFindReviewers::test_check_phabricator_activated",
"git_reviewers/tests/test.py::TestFindReviewers::test_check_phabricator_activated_none",
"git_reviewers/tests/test.py::TestFindReviewers::test_extract_uber_username_from_email",
"git_reviewers/tests/test.py::TestFindReviewers::test_extract_username_from_generic_email",
"git_reviewers/tests/test.py::TestFindReviewers::test_get_reviewers",
"git_reviewers/tests/test.py::TestFindReviewers::test_run_command",
"git_reviewers/tests/test.py::TestFindReviewers::test_run_command_empty_response",
"git_reviewers/tests/test.py::TestFindLogReviewers::test_get_changed_files",
"git_reviewers/tests/test.py::TestFindLogReviewers::test_gets_generic_emails",
"git_reviewers/tests/test.py::TestFindLogReviewers::test_gets_reviewers",
"git_reviewers/tests/test.py::TestFindLogReviewers::test_gets_uber_emails",
"git_reviewers/tests/test.py::TestFindLogReviewers::test_gets_user_weight",
"git_reviewers/tests/test.py::TestLogReviewers::test_get_changed_files",
"git_reviewers/tests/test.py::TestFindArcCommitReviewers::test_multiple_reviews",
"git_reviewers/tests/test.py::TestFindArcCommitReviewers::test_no_reviewers",
"git_reviewers/tests/test.py::TestFindArcCommitReviewers::test_reviewers",
"git_reviewers/tests/test.py::TestShowReviewers::test_copy_reviewers",
"git_reviewers/tests/test.py::TestShowReviewers::test_copy_reviewers_no_pbcopy",
"git_reviewers/tests/test.py::TestShowReviewers::test_show_reviewers",
"git_reviewers/tests/test.py::TestGetReviewers::test_verbose_reviewers",
"git_reviewers/tests/test.py::TestMain::test_ignore_reviewers",
"git_reviewers/tests/test.py::TestMain::test_main",
"git_reviewers/tests/test.py::TestMain::test_phabricator_disabled_reviewers",
"git_reviewers/tests/test.py::TestMain::test_version"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-15 03:42:39+00:00
|
mit
| 1,007
|
|
tehmaze__ansi-34
|
diff --git a/ansi/colour/rgb.py b/ansi/colour/rgb.py
index 13fbc02..6eb1e29 100644
--- a/ansi/colour/rgb.py
+++ b/ansi/colour/rgb.py
@@ -56,7 +56,7 @@ def rgb16(r: int, g: int, b: int) -> str:
return rgb_reduce(r, g, b, 16)
-def rgb256(r: int, g: int, b: int) -> str:
+def rgb256(r: int, g: int, b: int, bg: bool=False) -> str:
"""
Convert an RGB colour to 256 colour ANSI graphics.
@@ -79,4 +79,4 @@ def rgb256(r: int, g: int, b: int) -> str:
colour = sum([16] + [int(6 * float(val) / 256) * mod
for val, mod in ((r, 36), (g, 6), (b, 1))])
- return sequence('m', fields=3)(38, 5, colour)
+ return sequence('m', fields=3)(38 if not bg else 48, 5, colour)
|
tehmaze/ansi
|
f80c14bcee8a9c4b4aecbd88c24ba4818c64db77
|
diff --git a/test_ansi.py b/test_ansi.py
index a12d704..a15a75e 100644
--- a/test_ansi.py
+++ b/test_ansi.py
@@ -40,6 +40,11 @@ def test_rgb() -> None:
msg = (rgb256(0xff, 0x80, 0x00), 'hello world', reset)
assert ''.join(map(str, msg)) == '\x1b[38;5;214mhello world\x1b[0m'
+def test_rgb_bg() -> None:
+ from ansi.colour.rgb import rgb256
+ from ansi.colour.fx import reset
+ msg = (rgb256(0xff, 0x80, 0x00, bg=True), 'hello world', reset)
+ assert ''.join(map(str, msg)) == '\x1b[48;5;214mhello world\x1b[0m'
def test_osc() -> None:
from ansi import osc
|
RGB background color support
hi!
Adding a switch to `rgb256(r,g,b, bg=True)` you could return
```
if bg:
return sequence('m', fields=3)(48, 5, colour)
else:
return sequence('m', fields=3)(38, 5, colour)
```
|
0.0
|
f80c14bcee8a9c4b4aecbd88c24ba4818c64db77
|
[
"test_ansi.py::test_rgb_bg"
] |
[
"test_ansi.py::test_import",
"test_ansi.py::test_import_color",
"test_ansi.py::test_fg_bg",
"test_ansi.py::test_sugar",
"test_ansi.py::test_rgb",
"test_ansi.py::test_osc",
"test_ansi.py::test_iterm",
"test_ansi.py::test_add",
"test_ansi.py::test_add_to_string",
"test_ansi.py::test_add_other",
"test_ansi.py::test_empty",
"test_ansi.py::test_erase"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-01-22 00:42:39+00:00
|
mit
| 5,843
|
|
pcubillos__bibmanager-90
|
diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 44ed2ef..a3ea45c 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -2372,3 +2372,26 @@ Propagated typo corrections into the docs.
*****
Updated link of license badge to point to RTD docs.
+
+
+***** Sat Jun 12 19:01:21 CEST 2021 *****
+
+In bm.remove_dubplicates() added additional check for DOI after
+identical ISBN values in case the entries refer to different chapters
+of a same book.
+Added respective tests.
+
+*****
+
+Small tweak to docs (put latest tutorial video first).
+
+*****
+
+Bumped bibmanager to version 1.3.4
+
+*****
+
+Added mock_init to test_duplicate_isbn_same_unknown_doi() tests
+because the code needs the config file initialized to set the screen
+output style.
+
diff --git a/bibmanager/VERSION.py b/bibmanager/VERSION.py
index 1d5038e..c3c2aca 100644
--- a/bibmanager/VERSION.py
+++ b/bibmanager/VERSION.py
@@ -2,4 +2,4 @@
# bibmanager is open-source software under the MIT license (see LICENSE).
# bibmanager Version:
-__version__ = "1.3.3"
+__version__ = "1.3.4"
diff --git a/bibmanager/bib_manager/bib_manager.py b/bibmanager/bib_manager/bib_manager.py
index 20aa2fe..a8a00a3 100644
--- a/bibmanager/bib_manager/bib_manager.py
+++ b/bibmanager/bib_manager/bib_manager.py
@@ -461,6 +461,22 @@ def remove_duplicates(bibs, field):
if nbibs == 1:
continue
+ # If field is isbn, check doi to differentiate chapters from same book:
+ if field == 'isbn':
+ dois = [
+ bibs[idx].doi if bibs[idx].doi is not None else ""
+ for idx in indices]
+ u_doi, doi_inv, doi_counts = np.unique(
+ dois, return_inverse=True, return_counts=True)
+ doi_multis = np.where((doi_counts > 1) & (ubib != ""))[0]
+ single_dois = u_doi[doi_counts==1]
+ indices = [
+ idx for idx,doi in zip(indices,dois)
+ if doi not in single_dois]
+ nbibs = len(indices)
+ if nbibs <= 1:
+ continue
+
# Query the user:
labels = [idx + " ENTRY:\n" for idx in u.ordinal(np.arange(nbibs)+1)]
display_bibs(labels, [bibs[i] for i in indices])
diff --git a/docs/index.rst b/docs/index.rst
index 3a49ea1..63494ca 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -50,13 +50,13 @@ Check out this video tutorial to get started with ``bibmanager``:
.. raw:: html
- <iframe width="720" height="405" src="https://www.youtube.com/embed/WVmhdwVNXOE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
+ <iframe width="720" height="405" src="https://www.youtube.com/embed/qewdBx0M8VE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
-And the new features for version 1.3+:
+And this one covering some other features:
.. raw:: html
- <iframe width="720" height="405" src="https://www.youtube.com/embed/qewdBx0M8VE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
+ <iframe width="720" height="405" src="https://www.youtube.com/embed/WVmhdwVNXOE" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
.. _team:
|
pcubillos/bibmanager
|
0b465d2b83a889c09e1a668744f551ec01156882
|
diff --git a/tests/conftest.py b/tests/conftest.py
index 501e67a..e6c810b 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -233,20 +233,58 @@ archivePrefix = "arXiv",
publisher={AIP Publishing}
}"""
+ isbn_doi1 = """
+@INBOOK{2018haex.bookE.116P,
+ author = {{Parmentier}, Vivien and {Crossfield}, Ian J.~M.},
+ title = "{Exoplanet Phase Curves: Observations and Theory}",
+ year = 2018,
+ doi = {10.1007/978-3-319-55333-7\_116},
+ isbn = "978-3-319-55333-7",
+}"""
+
+ isbn_doi2 = """
+@INBOOK{2018haex.bookE.147C,
+ author = {{Cowan}, Nicolas B. and {Fujii}, Yuka},
+ title = "{Mapping Exoplanets}",
+ year = 2018,
+ doi = {10.1007/978-3-319-55333-7\_147},
+ isbn = "978-3-319-55333-7",
+}"""
+
+ isbn_no_doi1 = """
+@INBOOK{2018haex.bookE.116P,
+ author = {{Parmentier}, Vivien and {Crossfield}, Ian J.~M.},
+ title = "{Exoplanet Phase Curves: Observations and Theory}",
+ year = 2018,
+ isbn = "978-3-319-55333-7",
+}"""
+
+ isbn_no_doi2 = """
+@INBOOK{2018haex.bookE.147C,
+ author = {{Cowan}, Nicolas B. and {Fujii}, Yuka},
+ title = "{Mapping Exoplanets}",
+ year = 2018,
+ isbn = "978-3-319-55333-7",
+}"""
+
data = {
- 'jones_minimal': jones_minimal,
- 'jones_no_year': jones_no_year,
- 'jones_no_title': jones_no_title,
- 'jones_no_author': jones_no_author,
- 'jones_braces': jones_braces,
- 'beaulieu_apj': beaulieu_apj,
- 'beaulieu_arxiv': beaulieu_arxiv,
+ 'jones_minimal': jones_minimal,
+ 'jones_no_year': jones_no_year,
+ 'jones_no_title': jones_no_title,
+ 'jones_no_author': jones_no_author,
+ 'jones_braces': jones_braces,
+ 'beaulieu_apj': beaulieu_apj,
+ 'beaulieu_arxiv': beaulieu_arxiv,
'beaulieu_arxiv_dup': beaulieu_arxiv_dup,
- 'hunter': hunter,
- 'oliphant_dup': oliphant_dup,
- 'no_oliphant': no_oliphant,
- 'sing': sing,
- 'stodden': stodden,
+ 'hunter': hunter,
+ 'oliphant_dup': oliphant_dup,
+ 'no_oliphant': no_oliphant,
+ 'sing': sing,
+ 'stodden': stodden,
+ 'isbn_doi1': isbn_doi1,
+ 'isbn_doi2': isbn_doi2,
+ 'isbn_no_doi1': isbn_no_doi1,
+ 'isbn_no_doi2': isbn_no_doi2,
}
return data
diff --git a/tests/test_bib_manager.py b/tests/test_bib_manager.py
index 83edf8d..10922b9 100644
--- a/tests/test_bib_manager.py
+++ b/tests/test_bib_manager.py
@@ -801,6 +801,29 @@ def test_merge_duplicate_title_add(bibs, mock_init_sample, mock_input):
assert bibs['no_oliphant'] in loaded_bibs
+def test_duplicate_isbn_different_doi(capfd, entries):
+ text = entries['isbn_doi1'] + entries['isbn_doi2']
+ bibs = bm.read_file(text=text)
+ assert len(bibs) == 2
+ captured = capfd.readouterr()
+ assert captured.out == ''
+
+
+def test_duplicate_isbn_doi_vs_no_doi(capfd, entries):
+ text = entries['isbn_doi1'] + entries['isbn_no_doi2']
+ bibs = bm.read_file(text=text)
+ assert len(bibs) == 2
+ captured = capfd.readouterr()
+ assert captured.out == ''
+
+
[email protected]('mock_input', [['']], indirect=True)
+def test_duplicate_isbn_same_unknown_doi(mock_init, mock_input, entries):
+ text = entries['isbn_no_doi1'] + entries['isbn_no_doi2']
+ bibs = bm.read_file(text=text)
+ assert len(bibs) == 1
+
+
def test_init_from_scratch(mock_home):
shutil.rmtree(u.HOME, ignore_errors=True)
bm.init(bibfile=None)
|
ISBN duplicates problem
An edited book has many contributions. Each contribution has its own DOI number, but also shares its parent book's ISBN. Contributions and parent reference are now regarded as duplicates in bibmanager and merge always triggers asking. I have tried to disable exporting ISBN of contributions to bibfile. And this solved the annoyance of asking. But if only focusing on bibfile, this workaround seems lost ISBN information then. Is there a better way to settle this problem down?
|
0.0
|
0b465d2b83a889c09e1a668744f551ec01156882
|
[
"tests/test_bib_manager.py::test_duplicate_isbn_different_doi",
"tests/test_bib_manager.py::test_duplicate_isbn_doi_vs_no_doi"
] |
[
"tests/test_bib_manager.py::test_Bib_minimal",
"tests/test_bib_manager.py::test_Bib_ads_entry",
"tests/test_bib_manager.py::test_Bib_update_content",
"tests/test_bib_manager.py::test_Bib_mismatched_braces_raise",
"tests/test_bib_manager.py::test_Bib_update_key",
"tests/test_bib_manager.py::test_Bib_contains",
"tests/test_bib_manager.py::test_Bib_published_peer_reviewed",
"tests/test_bib_manager.py::test_Bib_published_arxiv",
"tests/test_bib_manager.py::test_Bib_published_non_ads",
"tests/test_bib_manager.py::test_Bib_month[-13]",
"tests/test_bib_manager.py::test_Bib_month[month",
"tests/test_bib_manager.py::test_Bib_lower_than_no_author",
"tests/test_bib_manager.py::test_Bib_lower_than_both_no_author",
"tests/test_bib_manager.py::test_Bib_lower_than_no_year",
"tests/test_bib_manager.py::test_Bib_equal_no_author",
"tests/test_bib_manager.py::test_Bib_equal_both_no_author",
"tests/test_bib_manager.py::test_Bib_not_equal_both_no_author",
"tests/test_bib_manager.py::test_Bib_not_equal_no_year",
"tests/test_bib_manager.py::test_Bib_equal_no_year",
"tests/test_bib_manager.py::test_Bib_meta",
"tests/test_bib_manager.py::test_Bib_warning_year",
"tests/test_bib_manager.py::test_Bib_warning_month[15]",
"tests/test_bib_manager.py::test_Bib_warning_month[tuesday]",
"tests/test_bib_manager.py::test_Bib_warning_authors_comma_typo",
"tests/test_bib_manager.py::test_Bib_warning_authors_missing_and",
"tests/test_bib_manager.py::test_remove_duplicates_no_duplicates",
"tests/test_bib_manager.py::test_remove_duplicates_identical",
"tests/test_bib_manager.py::test_remove_duplicates_diff_published",
"tests/test_bib_manager.py::test_remove_duplicates_query[mock_input0]",
"tests/test_bib_manager.py::test_filter_field_no_conflict",
"tests/test_bib_manager.py::test_filter_field_take_published",
"tests/test_bib_manager.py::test_filter_field_take_old",
"tests/test_bib_manager.py::test_filter_field_take_new",
"tests/test_bib_manager.py::test_filter_field_take_ask[mock_input0]",
"tests/test_bib_manager.py::test_filter_field_take_ask2[mock_input0]",
"tests/test_bib_manager.py::test_read_file_bibfile",
"tests/test_bib_manager.py::test_read_file_text",
"tests/test_bib_manager.py::test_read_file_single_line_entry",
"tests/test_bib_manager.py::test_read_file_ignore_comment",
"tests/test_bib_manager.py::test_read_file_ignore_comment_no_commas",
"tests/test_bib_manager.py::test_read_file_meta",
"tests/test_bib_manager.py::test_read_file_pdf_with_path",
"tests/test_bib_manager.py::test_read_file_pdf_with_bad_path",
"tests/test_bib_manager.py::test_read_file_error_bad_format",
"tests/test_bib_manager.py::test_read_file_error_open_end",
"tests/test_bib_manager.py::test_save",
"tests/test_bib_manager.py::test_load",
"tests/test_bib_manager.py::test_load_filed",
"tests/test_bib_manager.py::test_find_key",
"tests/test_bib_manager.py::test_find_bibcode",
"tests/test_bib_manager.py::test_find_key_bibcode",
"tests/test_bib_manager.py::test_find_key_not_found",
"tests/test_bib_manager.py::test_find_bibcode_not_found",
"tests/test_bib_manager.py::test_find_bibs",
"tests/test_bib_manager.py::test_find_no_arguments",
"tests/test_bib_manager.py::test_get_version_older",
"tests/test_bib_manager.py::test_get_version_no_pickle",
"tests/test_bib_manager.py::test_get_version_existing",
"tests/test_bib_manager.py::test_get_version_filed",
"tests/test_bib_manager.py::test_export_home",
"tests/test_bib_manager.py::test_export_no_overwrite",
"tests/test_bib_manager.py::test_export_meta",
"tests/test_bib_manager.py::test_export_no_meta",
"tests/test_bib_manager.py::test_merge_bibfile",
"tests/test_bib_manager.py::test_merge_bibs",
"tests/test_bib_manager.py::test_merge_no_new",
"tests/test_bib_manager.py::test_merge_base",
"tests/test_bib_manager.py::test_merge_bibs_no_titles",
"tests/test_bib_manager.py::test_merge_duplicate_key_ingnore[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_key_rename[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_title_ignore[mock_input0]",
"tests/test_bib_manager.py::test_merge_duplicate_title_add[mock_input0]",
"tests/test_bib_manager.py::test_duplicate_isbn_same_unknown_doi[mock_input0]",
"tests/test_bib_manager.py::test_init_from_scratch",
"tests/test_bib_manager.py::test_add_entries_dry[mock_prompt0]",
"tests/test_bib_manager.py::test_add_entries[mock_prompt0]",
"tests/test_bib_manager.py::test_search_author_lastname",
"tests/test_bib_manager.py::test_search_author_last_initials",
"tests/test_bib_manager.py::test_search_author_first",
"tests/test_bib_manager.py::test_search_author_multiple",
"tests/test_bib_manager.py::test_search_author_year_title",
"tests/test_bib_manager.py::test_search_title_multiple",
"tests/test_bib_manager.py::test_search_title_entry_without_title",
"tests/test_bib_manager.py::test_search_year_specific",
"tests/test_bib_manager.py::test_search_year_range",
"tests/test_bib_manager.py::test_search_bibcode",
"tests/test_bib_manager.py::test_search_bibcode_utf8",
"tests/test_bib_manager.py::test_search_bibcode_multiple",
"tests/test_bib_manager.py::test_search_key",
"tests/test_bib_manager.py::test_search_key_multiple",
"tests/test_bib_manager.py::test_prompt_search_kw1[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_kw2[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_extra[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_empty_prompt[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_empty_value[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_blank_value[mock_prompt_session0]",
"tests/test_bib_manager.py::test_prompt_search_double_def[mock_prompt_session0]"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-06-12 17:21:31+00:00
|
mit
| 4,482
|
|
netsiphd__netrd-264
|
diff --git a/netrd/utilities/entropy.py b/netrd/utilities/entropy.py
index dff68e2..9d6f3d7 100644
--- a/netrd/utilities/entropy.py
+++ b/netrd/utilities/entropy.py
@@ -15,7 +15,7 @@ from scipy.stats import entropy as sp_entropy
def js_divergence(P, Q):
- """Jenson-Shannon divergence between `P` and `Q`.
+ """Jensen-Shannon divergence between `P` and `Q`.
Parameters
----------
@@ -35,8 +35,10 @@ def js_divergence(P, Q):
return 0.5 * (sp_entropy(P, M, base=2) + sp_entropy(Q, M, base=2))
-def entropy(var):
- """Return the Shannon entropy of a variable.
+def entropy_from_seq(var):
+ """Return the Shannon entropy of a variable. This differs from
+ Scipy's entropy by taking a sequence of observations as input
+ rather than a histogram or probability distribution.
Parameters
----------
@@ -65,7 +67,7 @@ def joint_entropy(data):
Returns
-------
float
- Joint entrpoy of the variables of interests.
+ Joint entropy of the variables of interests.
Notes
-----
|
netsiphd/netrd
|
fa2c163376a88ed72ba15649190b1a2b23b1cb9a
|
diff --git a/tests/test_utilities.py b/tests/test_utilities.py
index 8837207..5b2ab79 100644
--- a/tests/test_utilities.py
+++ b/tests/test_utilities.py
@@ -8,7 +8,7 @@ Test utility functions.
import numpy as np
from netrd.utilities.entropy import categorized_data
-from netrd.utilities.entropy import entropy, joint_entropy, conditional_entropy
+from netrd.utilities.entropy import entropy_from_seq, joint_entropy, conditional_entropy
from netrd.utilities import threshold
@@ -89,7 +89,7 @@ def test_entropies():
"""
data = np.array([[1, 0, 0, 1, 1, 0, 1, 0], [0, 1, 0, 1, 1, 0, 1, 0]]).T
- H = entropy(data[:, 0])
+ H = entropy_from_seq(data[:, 0])
H_joint = joint_entropy(data)
H_cond = conditional_entropy(data[:, 1, np.newaxis], data[:, 0, np.newaxis])
|
Clarify usage of utilities.entropy.entropy?
When I was refactoring the Dmeasure code, I looked at using our version of entropy instead of scipy's, but one is not a drop-in replacement for the other because they take different inputs - one takes a histogram and the other takes a sequence of values. It would touch a lot of files but but renaming our `entropy` to something like `entropy_from_sequence` would remove this ambiguity.
|
0.0
|
fa2c163376a88ed72ba15649190b1a2b23b1cb9a
|
[
"tests/test_utilities.py::test_thresholds",
"tests/test_utilities.py::test_categorized_data",
"tests/test_utilities.py::test_entropies"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2019-10-14 15:14:50+00:00
|
mit
| 4,130
|
|
M3t0r__tpl-12
|
diff --git a/tpl/__init__.py b/tpl/__init__.py
index 96b6c41..32297a0 100644
--- a/tpl/__init__.py
+++ b/tpl/__init__.py
@@ -61,15 +61,19 @@ def main(*args):
for data in loaded_data:
collated_data = merge_data(collated_data, data)
+ # set up Jinja2 environment
+ j_env = jinja2.Environment(
+ keep_trailing_newline=True
+ )
+
# create template
with open_file(arguments[0]) as template_stream:
- template = jinja2.Template(template_stream.read())
+ template = j_env.from_string(template_stream.read())
template.filename = arguments[0]
# and render to output
with open_file(arguments[1], "w") as output:
template.stream(collated_data).dump(output)
- output.write("\n") # does the template eat this or the dump call?
return os.EX_OK
|
M3t0r/tpl
|
b534fa59fb808b10031869fe51f5e6382a1055dd
|
diff --git a/tests/cli/test_faulty_invocations.py b/tests/cli/test_faulty_invocations.py
index d81fbd8..d538ec6 100644
--- a/tests/cli/test_faulty_invocations.py
+++ b/tests/cli/test_faulty_invocations.py
@@ -9,7 +9,7 @@ def test_key_does_not_exist(cli):
cli.path_for_content("{{FOO}}"),
env={}
)
- assert p.stdout == "\n"
+ assert p.stdout == ""
def test_corrupt_yaml(cli):
diff --git a/tests/cli/test_standard_usecases.py b/tests/cli/test_standard_usecases.py
index c3c9426..90d0c68 100644
--- a/tests/cli/test_standard_usecases.py
+++ b/tests/cli/test_standard_usecases.py
@@ -3,12 +3,12 @@ from . import cli
def test_source_environment(cli):
p = cli("-e", cli.path_for_content("{{FOO}}"), env={"FOO": "bar"})
- assert p.stdout == "bar\n"
+ assert p.stdout == "bar"
def test_unicode_var(cli):
p = cli("-e", cli.path_for_content("{{FOO}}"), env={"FOO": "🐍"})
- assert p.stdout == "🐍\n"
+ assert p.stdout == "🐍"
def test_shadowing_json_env(cli):
@@ -18,7 +18,7 @@ def test_shadowing_json_env(cli):
cli.path_for_content("{{FOO}}"),
env={"FOO": "env"}
)
- assert p.stdout == "env\n"
+ assert p.stdout == "env"
def test_shadowing_yaml_env(cli):
@@ -28,7 +28,7 @@ def test_shadowing_yaml_env(cli):
cli.path_for_content("{{FOO}}"),
env={"FOO": "env"}
)
- assert p.stdout == "env\n"
+ assert p.stdout == "env"
def test_yaml_flow_style(cli):
@@ -36,7 +36,7 @@ def test_yaml_flow_style(cli):
"--yaml", cli.path_for_content('{"FOO": "yaml"}'),
cli.path_for_content("{{FOO}}")
)
- assert p.stdout == "yaml\n"
+ assert p.stdout == "yaml"
def test_environment_by_default(cli):
@@ -44,7 +44,7 @@ def test_environment_by_default(cli):
cli.path_for_content("{{FOO}}"),
env={"FOO": "bar"}
)
- assert p.stdout == "bar\n"
+ assert p.stdout == "bar"
def test_sub_dict_shadowing(cli):
@@ -53,7 +53,7 @@ def test_sub_dict_shadowing(cli):
"--json", cli.path_for_json({"FOO": {"BAR": "second"}}),
cli.path_for_content("{{FOO['BAR']}}")
)
- assert p.stdout == "second\n"
+ assert p.stdout == "second"
def test_sub_dict_merging(cli):
@@ -62,7 +62,7 @@ def test_sub_dict_merging(cli):
"--json", cli.path_for_json({"merge": {"BAR": "bar"}}),
cli.path_for_content("{{merge['FOO']}}{{merge['BAR']}}")
)
- assert p.stdout == "foobar\n"
+ assert p.stdout == "foobar"
def test_second_sub_dict_shadowing(cli):
@@ -71,7 +71,7 @@ def test_second_sub_dict_shadowing(cli):
"--json", cli.path_for_json({"merge": {"deeper": {"overwritten": "bar"}}}),
cli.path_for_content("{{merge.deeper.overwritten}}")
)
- assert p.stdout == "bar\n"
+ assert p.stdout == "bar"
def test_second_sub_dict_merging(cli):
@@ -80,7 +80,7 @@ def test_second_sub_dict_merging(cli):
"--json", cli.path_for_json({"merge": {"deeper": {"BAR": "bar"}}}),
cli.path_for_content("{{merge.deeper.FOO}}{{merge.deeper.BAR}}")
)
- assert p.stdout == "foobar\n"
+ assert p.stdout == "foobar"
def test_shadowing_of_dict(cli):
@@ -89,4 +89,29 @@ def test_shadowing_of_dict(cli):
"--json", cli.path_for_json({"merge": 'bar'}),
cli.path_for_content("{{merge}}")
)
+ assert p.stdout == "bar"
+
+
+def test_keep_no_newline_at_end(cli):
+ p = cli(cli.path_for_content("{{FOO}}"), env={"FOO": "bar"})
+ assert p.stdout == "bar"
+
+
+def test_keep_one_newline_at_end(cli):
+ p = cli(cli.path_for_content("{{FOO}}\n"), env={"FOO": "bar"})
assert p.stdout == "bar\n"
+
+
+def test_keep_two_newlines_at_end(cli):
+ p = cli(cli.path_for_content("{{FOO}}\n\n"), env={"FOO": "bar"})
+ assert p.stdout == "bar\n\n"
+
+
+def test_keep_one_newline_at_beginning(cli):
+ p = cli(cli.path_for_content("\n{{FOO}}"), env={"FOO": "bar"})
+ assert p.stdout == "\nbar"
+
+
+def test_keep_two_newlines_at_beginning(cli):
+ p = cli(cli.path_for_content("\n\n{{FOO}}"), env={"FOO": "bar"})
+ assert p.stdout == "\n\nbar"
|
Fix trailing newline issue
In https://github.com/M3t0r/tpl/blob/feceeed182f1c2553b827d8f431f6be800204250/tpl/__init__.py#L72 `tpl` always adds a trailing newline instead of respecting what the template has. I added this to have nicer output on the command line, but it turns out Jinja has a setting if it should keep the last newline, if any. It's called `keep_trailing_newline` and is part of the [`jinja2.Environment`](http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment).
We should add tests to see if `tpl` only prints a newline if one is actually present in the template.
|
0.0
|
b534fa59fb808b10031869fe51f5e6382a1055dd
|
[
"tests/cli/test_faulty_invocations.py::test_key_does_not_exist",
"tests/cli/test_standard_usecases.py::test_source_environment",
"tests/cli/test_standard_usecases.py::test_unicode_var",
"tests/cli/test_standard_usecases.py::test_shadowing_json_env",
"tests/cli/test_standard_usecases.py::test_shadowing_yaml_env",
"tests/cli/test_standard_usecases.py::test_yaml_flow_style",
"tests/cli/test_standard_usecases.py::test_environment_by_default",
"tests/cli/test_standard_usecases.py::test_sub_dict_shadowing",
"tests/cli/test_standard_usecases.py::test_sub_dict_merging",
"tests/cli/test_standard_usecases.py::test_second_sub_dict_shadowing",
"tests/cli/test_standard_usecases.py::test_second_sub_dict_merging",
"tests/cli/test_standard_usecases.py::test_shadowing_of_dict",
"tests/cli/test_standard_usecases.py::test_keep_no_newline_at_end",
"tests/cli/test_standard_usecases.py::test_keep_one_newline_at_beginning",
"tests/cli/test_standard_usecases.py::test_keep_two_newlines_at_beginning"
] |
[
"tests/cli/test_faulty_invocations.py::test_corrupt_yaml",
"tests/cli/test_faulty_invocations.py::test_corrupt_json",
"tests/cli/test_standard_usecases.py::test_keep_one_newline_at_end",
"tests/cli/test_standard_usecases.py::test_keep_two_newlines_at_end"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-10-12 19:00:06+00:00
|
mit
| 330
|
|
datafolklabs__cement-559
|
diff --git a/cement/ext/ext_configparser.py b/cement/ext/ext_configparser.py
index 587e89e..0aa8f0d 100644
--- a/cement/ext/ext_configparser.py
+++ b/cement/ext/ext_configparser.py
@@ -152,12 +152,12 @@ class ConfigParserConfigHandler(config.ConfigHandler, RawConfigParser):
env_var = re.sub('[^0-9a-zA-Z]+', '_', env_var)
return env_var
- def get(self, section, key):
+ def get(self, section, key, **kwargs):
env_var = self._get_env_var(section, key)
if env_var in os.environ.keys():
return os.environ[env_var]
else:
- return RawConfigParser.get(self, section, key)
+ return RawConfigParser.get(self, section, key, **kwargs)
def has_section(self, section):
return RawConfigParser.has_section(self, section)
|
datafolklabs/cement
|
775fc4d933a4674f131418671c87f79944778e13
|
diff --git a/tests/ext/test_ext_configparser.py b/tests/ext/test_ext_configparser.py
index b0ed755..e5bf5fa 100644
--- a/tests/ext/test_ext_configparser.py
+++ b/tests/ext/test_ext_configparser.py
@@ -54,3 +54,15 @@ def test_env_var_override():
assert app.config.get('dummy', 'foo') == 'dummy-not-bar'
section_dict = app.config.get_section_dict('dummy')
assert section_dict['foo'] == 'dummy-not-bar'
+
+
+def test_get_boolean():
+ with TestApp(config_section='testapp') as app:
+ app.config.set('testapp', 'foobool', 'true')
+ assert app.config['testapp'].getboolean('foobool') is True
+
+ app.config.set('testapp', 'foobool', 'no')
+ assert app.config['testapp'].getboolean('foobool') is False
+
+ os.environ['TESTAPP_FOOBOOL'] = '1'
+ assert app.config['testapp'].getboolean('foobool') is True
|
Configparser 'getboolean' exception
**System Information**
- Cement Version: 3.0.0
- Python Version: 3.6.8
- Operating System and Version: Linux Mint 19.1
**Steps to Reproduce (Bugs Only)**
- Create a boolean setting: configparser only supports string values so this has to be a string representation of a boolean. [According to configparser](https://docs.python.org/3/library/configparser.html#supported-datatypes), this includes
> 'yes'/'no', 'on'/'off', 'true'/'false' and '1'/'0'
- Access this value using the `getboolean` method recommended by configparser. This should automatically convert the string value to a bool. Example usage:
`if self.app.config['hydra'].getboolean('validator_metrics'):`
- Exception:
```Traceback (most recent call last):
File "/home/adam/.pyenv/versions/hydra/bin/hydra", line 11, in <module>
load_entry_point('hydra', 'console_scripts', 'hydra')()
File "/home/adam/PycharmProjects/hydra/hydra/main.py", line 152, in main
app.run()
File "/home/adam/.pyenv/versions/hydra/lib/python3.6/site-packages/cement-3.0.0-py3.6.egg/cement/core/foundation.py", line 916, in run
return_val = self.controller._dispatch()
File "/home/adam/.pyenv/versions/hydra/lib/python3.6/site-packages/cement-3.0.0-py3.6.egg/cement/ext/ext_argparse.py", line 806, in _dispatch
return func()
File "/home/adam/PycharmProjects/hydra/hydra/controllers/client.py", line 621, in enable_metrics
self.app.client.configure_metrics()
File "/home/adam/PycharmProjects/hydra/hydra/helpers/client.py", line 275, in configure_metrics
if self.app.config['hydra'].getboolean('validator_metrics'):
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 1283, in get
fallback=fallback, **kwargs)
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 829, in getboolean
raw=raw, vars=vars, fallback=fallback, **kwargs)
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 809, in _get_conv
**kwargs)
File "/home/adam/.pyenv/versions/3.6.8/lib/python3.6/configparser.py", line 803, in _get
return conv(self.get(section, option, **kwargs))
TypeError: get() got an unexpected keyword argument 'raw'
```
I am thinking this is due to the fact that cement does not pass kwargs to configparser's wrapped `get` method. I am going to attempt making a PR to address this issue.
|
0.0
|
775fc4d933a4674f131418671c87f79944778e13
|
[
"tests/ext/test_ext_configparser.py::test_get_boolean"
] |
[
"tests/ext/test_ext_configparser.py::TestConfigParserConfigHandler::test_subclassing",
"tests/ext/test_ext_configparser.py::test_get_dict",
"tests/ext/test_ext_configparser.py::test_env_var_override"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-05-07 15:16:20+00:00
|
bsd-3-clause
| 1,810
|
|
python-control__python-control-892
|
diff --git a/control/iosys.py b/control/iosys.py
index 78444f7..dca00d3 100644
--- a/control/iosys.py
+++ b/control/iosys.py
@@ -1862,7 +1862,7 @@ def input_output_response(
return TimeResponseData(
t_eval, y, None, u, issiso=sys.issiso(),
- output_labels=sys.output_index, input_labels=sys.input_index,
+ output_labels=sys.output_labels, input_labels=sys.input_labels,
transpose=transpose, return_x=return_x, squeeze=squeeze)
# Create a lambda function for the right hand side
@@ -1941,8 +1941,8 @@ def input_output_response(
return TimeResponseData(
soln.t, y, soln.y, u, issiso=sys.issiso(),
- output_labels=sys.output_index, input_labels=sys.input_index,
- state_labels=sys.state_index,
+ output_labels=sys.output_labels, input_labels=sys.input_labels,
+ state_labels=sys.state_labels,
transpose=transpose, return_x=return_x, squeeze=squeeze)
@@ -2881,7 +2881,7 @@ def interconnect(
# Look for the signal name as a system input
for sys in syslist:
- if signal_name in sys.input_index.keys():
+ if signal_name in sys.input_labels:
connection.append(sign + sys.name + "." + signal_name)
# Make sure we found the name
diff --git a/control/statesp.py b/control/statesp.py
index 41f92ae..8661d87 100644
--- a/control/statesp.py
+++ b/control/statesp.py
@@ -1777,7 +1777,9 @@ def _mimo2siso(sys, input, output, warn_conversion=False):
new_B = sys.B[:, input]
new_C = sys.C[output, :]
new_D = sys.D[output, input]
- sys = StateSpace(sys.A, new_B, new_C, new_D, sys.dt)
+ sys = StateSpace(sys.A, new_B, new_C, new_D, sys.dt,
+ name=sys.name,
+ inputs=sys.input_labels[input], outputs=sys.output_labels[output])
return sys
@@ -1826,7 +1828,9 @@ def _mimo2simo(sys, input, warn_conversion=False):
# Y = C*X + D*U
new_B = sys.B[:, input:input+1]
new_D = sys.D[:, input:input+1]
- sys = StateSpace(sys.A, new_B, sys.C, new_D, sys.dt)
+ sys = StateSpace(sys.A, new_B, sys.C, new_D, sys.dt,
+ name=sys.name,
+ inputs=sys.input_labels[input], outputs=sys.output_labels)
return sys
diff --git a/control/timeresp.py b/control/timeresp.py
index 638a073..bd8595c 100644
--- a/control/timeresp.py
+++ b/control/timeresp.py
@@ -694,7 +694,10 @@ def _process_labels(labels, signal, length):
raise ValueError("Name dictionary for %s is incomplete" % signal)
# Convert labels to a list
- labels = list(labels)
+ if isinstance(labels, str):
+ labels = [labels]
+ else:
+ labels = list(labels)
# Make sure the signal list is the right length and type
if len(labels) != length:
@@ -1111,6 +1114,8 @@ def forced_response(sys, T=None, U=0., X0=0., transpose=False,
return TimeResponseData(
tout, yout, xout, U, issiso=sys.issiso(),
+ output_labels=sys.output_labels, input_labels=sys.input_labels,
+ state_labels=sys.state_labels,
transpose=transpose, return_x=return_x, squeeze=squeeze)
@@ -1374,8 +1379,16 @@ def step_response(sys, T=None, X0=0., input=None, output=None, T_num=None,
# Figure out if the system is SISO or not
issiso = sys.issiso() or (input is not None and output is not None)
+ # Select only the given input and output, if any
+ input_labels = sys.input_labels if input is None \
+ else sys.input_labels[input]
+ output_labels = sys.output_labels if output is None \
+ else sys.output_labels[output]
+
return TimeResponseData(
response.time, yout, xout, uout, issiso=issiso,
+ output_labels=output_labels, input_labels=input_labels,
+ state_labels=sys.state_labels,
transpose=transpose, return_x=return_x, squeeze=squeeze)
@@ -1704,9 +1717,15 @@ def initial_response(sys, T=None, X0=0., input=0, output=None, T_num=None,
# Figure out if the system is SISO or not
issiso = sys.issiso() or (input is not None and output is not None)
+ # Select only the given output, if any
+ output_labels = sys.output_labels if output is None \
+ else sys.output_labels[0]
+
# Store the response without an input
return TimeResponseData(
response.t, response.y, response.x, None, issiso=issiso,
+ output_labels=output_labels, input_labels=None,
+ state_labels=sys.state_labels,
transpose=transpose, return_x=return_x, squeeze=squeeze)
@@ -1798,7 +1817,7 @@ def impulse_response(sys, T=None, X0=0., input=None, output=None, T_num=None,
-----
This function uses the `forced_response` function to compute the time
response. For continuous time systems, the initial condition is altered to
- account for the initial impulse. For discrete-time aystems, the impulse is
+ account for the initial impulse. For discrete-time aystems, the impulse is
sized so that it has unit area.
Examples
@@ -1869,8 +1888,16 @@ def impulse_response(sys, T=None, X0=0., input=None, output=None, T_num=None,
# Figure out if the system is SISO or not
issiso = sys.issiso() or (input is not None and output is not None)
+ # Select only the given input and output, if any
+ input_labels = sys.input_labels if input is None \
+ else sys.input_labels[input]
+ output_labels = sys.output_labels if output is None \
+ else sys.output_labels[output]
+
return TimeResponseData(
response.time, yout, xout, uout, issiso=issiso,
+ output_labels=output_labels, input_labels=input_labels,
+ state_labels=sys.state_labels,
transpose=transpose, return_x=return_x, squeeze=squeeze)
|
python-control/python-control
|
9c26e2214f82e592b5b64cf8f581ac14c198a46f
|
diff --git a/control/tests/trdata_test.py b/control/tests/trdata_test.py
index 734d355..028e535 100644
--- a/control/tests/trdata_test.py
+++ b/control/tests/trdata_test.py
@@ -196,15 +196,20 @@ def test_response_copy():
with pytest.raises(ValueError, match="not enough"):
t, y, x = response_mimo
- # Labels
- assert response_mimo.output_labels is None
- assert response_mimo.state_labels is None
- assert response_mimo.input_labels is None
+ # Make sure labels are transferred to the response
+ assert response_siso.output_labels == sys_siso.output_labels
+ assert response_siso.state_labels == sys_siso.state_labels
+ assert response_siso.input_labels == sys_siso.input_labels
+ assert response_mimo.output_labels == sys_mimo.output_labels
+ assert response_mimo.state_labels == sys_mimo.state_labels
+ assert response_mimo.input_labels == sys_mimo.input_labels
+
+ # Check relabelling
response = response_mimo(
output_labels=['y1', 'y2'], input_labels='u',
- state_labels=["x[%d]" % i for i in range(4)])
+ state_labels=["x%d" % i for i in range(4)])
assert response.output_labels == ['y1', 'y2']
- assert response.state_labels == ['x[0]', 'x[1]', 'x[2]', 'x[3]']
+ assert response.state_labels == ['x0', 'x1', 'x2', 'x3']
assert response.input_labels == ['u']
# Unknown keyword
@@ -231,6 +236,17 @@ def test_trdata_labels():
np.testing.assert_equal(
response.input_labels, ["u[%d]" % i for i in range(sys.ninputs)])
+ # Make sure the selected input and output are both correctly transferred to the response
+ for nu in range(sys.ninputs):
+ for ny in range(sys.noutputs):
+ step_response = ct.step_response(sys, T, input=nu, output=ny)
+ assert step_response.input_labels == [sys.input_labels[nu]]
+ assert step_response.output_labels == [sys.output_labels[ny]]
+
+ init_response = ct.initial_response(sys, T, input=nu, output=ny)
+ assert init_response.input_labels == None
+ assert init_response.output_labels == [sys.output_labels[ny]]
+
def test_trdata_multitrace():
#
|
Missing labels from forced_response output
Hi,
I wrote a state-space system representation with labels, like:
``` Python
sys = ct.ss(
A, B, C, D,
name="motor",
states=("Ia", "Wm"),
inputs=("Va", "Tl"),
outputs=("Ia", "Wm", "Va", "Tl"),
)
```
But after simulating it, the result (a `TimeResponseData`), lacks information about the labels of the simulated system:
``` Python
T = np.linspace(0, 0.360, 360)
Va = np.ones_like(T) * 36
Tl = np.linspace(0, 15, len(T))
res = ct.forced_response(motor, T, U=[Va, Tl], X0=[0, 0])
print(res.output_labels) # this is None
print(res.input_labels) # this is None
print(res.state_labels) # this is None
```
I wanted to use `.to_pandas()`, and after assigning it manually, it works, so it's probably just a matter of passing it forward internally in the code:
``` Python
res.output_labels = motor.output_labels
res.input_labels = motor.input_labels
res.state_labels = motor.state_labels
df = res.to_pandas().set_index("time")
```
For completeness, this is the error if I try to use `.to_pandass()` without the abovementioned workaround:
``` Python
648 # Create a dict for setting up the data frame
649 data = {'time': self.time}
650 data.update(
--> 651 {name: self.u[i] for i, name in enumerate(self.input_labels)})
652 data.update(
653 {name: self.y[i] for i, name in enumerate(self.output_labels)})
654 data.update(
655 {name: self.x[i] for i, name in enumerate(self.state_labels)})
TypeError: 'NoneType' object is not iterable
```
Thanks
|
0.0
|
9c26e2214f82e592b5b64cf8f581ac14c198a46f
|
[
"control/tests/trdata_test.py::test_response_copy",
"control/tests/trdata_test.py::test_trdata_labels"
] |
[
"control/tests/trdata_test.py::test_trdata_shapes[1-1-None]",
"control/tests/trdata_test.py::test_trdata_shapes[1-1-True]",
"control/tests/trdata_test.py::test_trdata_shapes[1-1-False]",
"control/tests/trdata_test.py::test_trdata_shapes[1-2-None]",
"control/tests/trdata_test.py::test_trdata_shapes[1-2-True]",
"control/tests/trdata_test.py::test_trdata_shapes[1-2-False]",
"control/tests/trdata_test.py::test_trdata_shapes[2-1-None]",
"control/tests/trdata_test.py::test_trdata_shapes[2-1-True]",
"control/tests/trdata_test.py::test_trdata_shapes[2-1-False]",
"control/tests/trdata_test.py::test_trdata_shapes[2-3-None]",
"control/tests/trdata_test.py::test_trdata_shapes[2-3-True]",
"control/tests/trdata_test.py::test_trdata_shapes[2-3-False]",
"control/tests/trdata_test.py::test_trdata_multitrace",
"control/tests/trdata_test.py::test_trdata_exceptions"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-05-20 16:48:37+00:00
|
bsd-3-clause
| 5,071
|
|
OCA__oca-github-bot-59
|
diff --git a/HISTORY.rst b/HISTORY.rst
index 0f9ae33..08d2ad2 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -1,6 +1,10 @@
next
~~~~
+**Features**
+
+- Improved command parser (#53)
+
**Bug fixes**
- Do not attempt to build wheels for uninstallable addons.
diff --git a/src/oca_github_bot/commands.py b/src/oca_github_bot/commands.py
index e377d69..7aba1e7 100644
--- a/src/oca_github_bot/commands.py
+++ b/src/oca_github_bot/commands.py
@@ -6,7 +6,7 @@ import re
from .tasks import merge_bot
BOT_COMMAND_RE = re.compile(
- r"/ocabot +(?P<command>\w+)( +(?P<options>.*?))? *$", re.MULTILINE
+ r"/ocabot[ \t]+(?P<command>\w+)(?P<options>[ \t\w]*)(\W|\r?$)", re.MULTILINE
)
@@ -23,6 +23,7 @@ class InvalidOptionsError(Exception):
class BotCommand:
def __init__(self, name, options):
self.name = name
+ self.options = options
self.parse_options(options)
@classmethod
@@ -46,8 +47,8 @@ class BotCommandMerge(BotCommand):
def parse_options(self, options):
if not options:
return
- if options in ("major", "minor", "patch"):
- self.bumpversion = options
+ if len(options) == 1 and options[0] in ("major", "minor", "patch"):
+ self.bumpversion = options[0]
else:
raise InvalidOptionsError(self.name, options)
@@ -60,4 +61,6 @@ class BotCommandMerge(BotCommand):
def parse_commands(text):
""" Parse a text and return an iterator of BotCommand objects. """
for mo in BOT_COMMAND_RE.finditer(text):
- yield BotCommand.create(mo.group("command"), mo.group("options"))
+ yield BotCommand.create(
+ mo.group("command"), mo.group("options").strip().split()
+ )
|
OCA/oca-github-bot
|
0ab1c02e2ef25e2b3231a326e04c31b40710ce6f
|
diff --git a/tests/test_commands.py b/tests/test_commands.py
index 1195b53..2091988 100644
--- a/tests/test_commands.py
+++ b/tests/test_commands.py
@@ -22,11 +22,38 @@ def test_parse_command_multi():
/ocabot merge major
/ocabot merge patch
/ocabot merge patch
+ /ocabot merge, please
+ /ocabot merge minor, please
+ /ocabot merge minor, please
+ /ocabot merge.
+ /ocabot merge patch. blah
+ /ocabot merge minor # ignored
...
"""
)
)
- assert len(cmds) == 3
+ assert [(cmd.name, cmd.options) for cmd in cmds] == [
+ ("merge", ["major"]),
+ ("merge", ["patch"]),
+ ("merge", ["patch"]),
+ ("merge", []),
+ ("merge", ["minor"]),
+ ("merge", ["minor"]),
+ ("merge", []),
+ ("merge", ["patch"]),
+ ("merge", ["minor"]),
+ ]
+
+
+def test_parse_command_2():
+ cmds = list(
+ parse_commands(
+ "Great contribution, thanks!\r\n\r\n"
+ "/ocabot merge\r\n\r\n"
+ "Please forward port it to 12.0."
+ )
+ )
+ assert [(cmd.name, cmd.options) for cmd in cmds] == [("merge", [])]
def test_parse_command_merge():
|
merge bot: parse command inside a greater message
For now, the merge bot only reacts to the merge command putting that comment in an isolated message (example of greater message: https://github.com/OCA/account-invoicing/pull/552#issuecomment-507203903).
|
0.0
|
0ab1c02e2ef25e2b3231a326e04c31b40710ce6f
|
[
"tests/test_commands.py::test_parse_command_multi",
"tests/test_commands.py::test_parse_command_2"
] |
[
"tests/test_commands.py::test_parse_command_not_a_command",
"tests/test_commands.py::test_parse_command_merge"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-07-31 11:18:29+00:00
|
mit
| 401
|
|
michaelbukachi__flask-vuesfc-4
|
diff --git a/README.rst b/README.rst
index 9f1ab59..5cd44a6 100644
--- a/README.rst
+++ b/README.rst
@@ -122,6 +122,13 @@ escape it. So be sure to always use the ``safe`` filter when rendering the compo
Feel free to checkout the examples folder for other examples.
+Escaping
+--------
+Occasionally, you might need a variable to be parsed by ``jinja`` instead of ``vue``. To achieve this, use triple
+parenthesis i.e ``{{{ variable }}}``.
+
+Checkout **examples/app_with_escaping.py**.
+
--------------
Configuration
--------------
diff --git a/example/app_with_escaping.py b/example/app_with_escaping.py
new file mode 100644
index 0000000..9be1b30
--- /dev/null
+++ b/example/app_with_escaping.py
@@ -0,0 +1,27 @@
+from flask import Flask, render_template
+
+from flask_vue_sfc import VueSFC
+from flask_vue_sfc.helpers import render_vue_component
+
+
+class Config:
+ SECRET_KEY = 'some-very-long-secret'
+
+
+def create_app():
+ app = Flask(__name__)
+ app.config.from_object(Config)
+
+ VueSFC(app)
+
+ @app.route('/')
+ def example1():
+ component = render_vue_component('index4.vue', message='This is rendered by flask')
+ return render_template('example.html', component=component)
+
+ return app
+
+
+if __name__ == '__main__':
+ application = create_app()
+ application.run(debug=True)
diff --git a/example/templates/index4.vue b/example/templates/index4.vue
new file mode 100644
index 0000000..5702999
--- /dev/null
+++ b/example/templates/index4.vue
@@ -0,0 +1,15 @@
+<template>
+ <div>{{ message }}</div>
+ <div>{{{ message }}}</div>
+</template>
+<script>
+
+export default {
+ name: 'App',
+ data() {
+ return {
+ message: 'This is rendered by Vue',
+ }
+ }
+}
+</script>
\ No newline at end of file
diff --git a/flask_vue_sfc/helpers.py b/flask_vue_sfc/helpers.py
index b7b6bb0..ecc10de 100644
--- a/flask_vue_sfc/helpers.py
+++ b/flask_vue_sfc/helpers.py
@@ -10,10 +10,11 @@ def _create_random_id():
return 'vue-sfc-' + secrets.token_hex(6)
-def _load_template(template_name):
+def _load_template(template_name, **context):
ctx = _app_ctx_stack.top
+ ctx.app.update_template_context(context)
t = ctx.app.jinja_env.get_or_select_template(template_name)
- vue = t.render()
+ vue = t.render(context)
parsed = ctx.g.v8.call('VueTemplateCompiler.parseComponent', vue)
component = {
@@ -25,7 +26,7 @@ def _load_template(template_name):
return component
-def _render_component(template_name):
+def _render_component(template_name, **context):
ctx = _app_ctx_stack.top
if 'sfc_cache' in ctx.g:
@@ -33,7 +34,7 @@ def _render_component(template_name):
if sfc:
return sfc
- src = _load_template(template_name)
+ src = _load_template(template_name, **context)
component = VueComponent(src, _create_random_id, _load_template)
sfc = component.render(ctx.g.v8)
sfc = str(sfc)
@@ -46,11 +47,12 @@ def _render_component(template_name):
def render_vue_component(template_name, **context):
is_page = context.get('is_page', False)
- component = _render_component(template_name)
+ component = _render_component(template_name, **context)
if is_page:
return render_template('page.html', component=component)
return component
-def render_vue_page(template_name):
- return render_vue_component(template_name, is_page=True)
+def render_vue_page(template_name, **context):
+ context['is_page'] = True
+ return render_vue_component(template_name, **context)
diff --git a/flask_vue_sfc/utils.py b/flask_vue_sfc/utils.py
index 81871d9..f4c6bbc 100644
--- a/flask_vue_sfc/utils.py
+++ b/flask_vue_sfc/utils.py
@@ -17,7 +17,9 @@ class VueLoader(FileSystemLoader):
if template and template.lower().endswith('.vue'):
# We don't want jinja to touch {{ }}
contents, filename, uptodate = super(VueLoader, self).get_source(environment, template)
- contents = '{% raw %}\n' + contents.replace('</template>', '</template>\n{% endraw %}')
+ contents = _change_delimiters(contents)
+ # contents = '{% raw %}\n' + contents.replace('</template>', '</template>\n{% endraw %}')
+ # print(contents)
return contents, filename, uptodate
return super(VueLoader, self).get_source(environment, template)
@@ -234,10 +236,8 @@ class HtmlTemplate:
)
html = html_minify(html)
# Handler delimiters replacement to prevent conflicts with jinja
- if '{{' in html:
- html = html.replace('{{', '[[')
- html = html.replace('}}', ']]')
return html
+ # return _change_delimiters(html)
class ChildHtmlTemplate(HtmlTemplate):
@@ -255,10 +255,8 @@ class ChildHtmlTemplate(HtmlTemplate):
)
html = html_minify(html)
# Handler delimiters replacement to prevent conflicts with jinja
- if '{{' in html:
- html = html.replace('{{', '[[')
- html = html.replace('}}', ']]')
return html
+ # return _change_delimiters(html)
class CssStyling:
@@ -330,3 +328,17 @@ class VueChildComponent(ChildVueScript, ChildHtmlTemplate, CssStyling):
def _get_file_contents(path):
with open(path, 'r') as fp:
return fp.read()
+
+
+def _change_delimiters(html):
+ if '{{' in html:
+ html = html.replace('{{{', 'op_par')
+ html = html.replace('}}}', 'cl_par')
+
+ html = html.replace('{{', '[[')
+ html = html.replace('}}', ']]')
+
+ html = html.replace('op_par', '{{')
+ html = html.replace('cl_par', '}}')
+
+ return html
|
michaelbukachi/flask-vuesfc
|
6871322cb72f44bc45c8b88a358935289139e7f7
|
diff --git a/tests/test_utils.py b/tests/test_utils.py
index c4ba0d6..7044407 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -1,4 +1,4 @@
-from flask_vue_sfc.utils import VueComponent, VueChildComponent, SFC
+from flask_vue_sfc.utils import VueComponent, VueChildComponent, SFC, _change_delimiters
def test_render_html():
@@ -185,3 +185,9 @@ def test_render_sfc__with_child():
"</style>\n"
)
assert str(sfc) == expected
+
+
+def test_change_delimiters__escape_syntax():
+ expected = '{{ test }}'
+ html = _change_delimiters('{{{ test }}}')
+ assert expected == html
|
Add support for escaping variable placeholders
Currently, the extension automatically converts `{{ }}` to `[[ ]]` so that it doesn't get parsed by flask's template engine. This is convenient most of the time, however, there are times when we the placeholder to parsed by flask instead of Vue, This issue proposes the introduction of triple parenthesis `{{{ }}}` for such cases. When the converter meets triple parenthesis it converts them to double parenthesis where it will be parsed by the flask parser.
|
0.0
|
6871322cb72f44bc45c8b88a358935289139e7f7
|
[
"tests/test_utils.py::test_render_html",
"tests/test_utils.py::test_render_child_html",
"tests/test_utils.py::test_render_css",
"tests/test_utils.py::test_render_sfc__no_child",
"tests/test_utils.py::test_render_sfc__with_child",
"tests/test_utils.py::test_change_delimiters__escape_syntax"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-01-11 12:37:41+00:00
|
mit
| 3,892
|
|
genericclient__genericclient-requests-14
|
diff --git a/genericclient/__init__.py b/genericclient/__init__.py
index fc0d66a..89af323 100644
--- a/genericclient/__init__.py
+++ b/genericclient/__init__.py
@@ -24,7 +24,7 @@ class Endpoint(BaseEndpoint):
data=self.api.hydrate_data(resp),
)
- if response.status_code == 403:
+ if response.status_code in (401, 403):
if self.api.session.auth:
msg = "Failed request to `{}`. Cannot authenticate user `{}` on the API.".format(
url, self.api.session.auth[0],
|
genericclient/genericclient-requests
|
8fba5f6101b7226a8a6a3e7fe8a02e22c6314cf9
|
diff --git a/tests/test_auth.py b/tests/test_auth.py
index c7a99cb..79c4ce9 100644
--- a/tests/test_auth.py
+++ b/tests/test_auth.py
@@ -8,6 +8,15 @@ MOCK_API_URL = 'http://dummy.org'
class AuthClientTestCase(TestCase):
+ def test_401(self):
+ generic_client = GenericClient(
+ url=MOCK_API_URL,
+ )
+ with responses.RequestsMock() as rsps:
+ rsps.add(responses.GET, MOCK_API_URL + '/users', status=401)
+ with self.assertRaises(generic_client.NotAuthenticatedError):
+ generic_client.users.all()
+
def test_403(self):
generic_client = GenericClient(
url=MOCK_API_URL,
@@ -17,7 +26,6 @@ class AuthClientTestCase(TestCase):
with self.assertRaises(generic_client.NotAuthenticatedError):
generic_client.users.all()
-
def test_403_auth(self):
generic_client = GenericClient(
url=MOCK_API_URL,
diff --git a/tests/test_endpoint.py b/tests/test_endpoint.py
index 848e0b5..d035401 100644
--- a/tests/test_endpoint.py
+++ b/tests/test_endpoint.py
@@ -126,6 +126,12 @@ class EndpointTestCase(TestCase):
self.assertRaises(generic_client.ResourceNotFound, generic_client.users.get, uuid=9999)
def test_unauthenticated(self):
+ with responses.RequestsMock() as rsps:
+ rsps.add(responses.GET, MOCK_API_URL + '/users', status=401)
+
+ with self.assertRaises(generic_client.NotAuthenticatedError):
+ generic_client.users.all()
+
with responses.RequestsMock() as rsps:
rsps.add(responses.GET, MOCK_API_URL + '/users', status=403)
|
Does not handle response status code 401
When a user sends a GET request to a DRF server using invalid base authentication email/password values, a response status code of 401 is returned. The client does not raise an exception when this happens, which results in a None resource being returned. The client should raise NotAuthenticatedError for both 401 and 403 status codes. See https://www.django-rest-framework.org/api-guide/authentication/
|
0.0
|
8fba5f6101b7226a8a6a3e7fe8a02e22c6314cf9
|
[
"tests/test_auth.py::AuthClientTestCase::test_401",
"tests/test_endpoint.py::EndpointTestCase::test_unauthenticated"
] |
[
"tests/test_auth.py::AuthClientTestCase::test_403",
"tests/test_auth.py::AuthClientTestCase::test_403_auth",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_create",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_all",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_delete",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_get_params",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_filter",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_get_id",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_links",
"tests/test_endpoint.py::EndpointTestCase::test_endpoint_get_uuid"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-16 15:07:49+00:00
|
mit
| 2,440
|
|
peterbe__premailer-218
|
diff --git a/premailer/premailer.py b/premailer/premailer.py
index 344a6ad..37fd743 100644
--- a/premailer/premailer.py
+++ b/premailer/premailer.py
@@ -311,18 +311,18 @@ class Premailer(object):
return rules, leftover
def transform(self, html=None, pretty_print=True, **kwargs):
- """change the self.html and return it with CSS turned into style
+ """change the html and return it with CSS turned into style
attributes.
"""
- if html is not None:
- if self.html is not None:
- raise TypeError("Can't pass html argument twice")
- self.html = html
- elif self.html is None:
+ if html is not None and self.html is not None:
+ raise TypeError("Can't pass html argument twice")
+ elif html is None and self.html is None:
raise TypeError("must pass html as first argument")
- if hasattr(self.html, "getroottree"):
+ elif html is None:
+ html = self.html
+ if hasattr(html, "getroottree"):
# skip the next bit
- root = self.html.getroottree()
+ root = html.getroottree()
page = root
tree = root
else:
@@ -330,7 +330,7 @@ class Premailer(object):
parser = etree.XMLParser(ns_clean=False, resolve_entities=False)
else:
parser = etree.HTMLParser()
- stripped = self.html.strip()
+ stripped = html.strip()
tree = etree.fromstring(stripped, parser).getroottree()
page = tree.getroot()
# lxml inserts a doctype if none exists, so only include it in
@@ -379,6 +379,7 @@ class Premailer(object):
css_body = self._load_external(href)
these_rules, these_leftover = self._parse_style_rules(css_body, index)
+
index += 1
rules.extend(these_rules)
parent_of_element = element.getparent()
@@ -522,7 +523,7 @@ class Premailer(object):
continue
parent.attrib[attr] = urljoin(self.base_url, url)
- if hasattr(self.html, "getroottree"):
+ if hasattr(html, "getroottree"):
return root
else:
kwargs.setdefault("method", self.method)
|
peterbe/premailer
|
350af2440ccc9598d01841d1a22a8e5236da85a1
|
diff --git a/premailer/tests/test_premailer.py b/premailer/tests/test_premailer.py
index f0ef4ac..0ff7a37 100644
--- a/premailer/tests/test_premailer.py
+++ b/premailer/tests/test_premailer.py
@@ -215,6 +215,68 @@ class Tests(unittest.TestCase):
p = Premailer()
assert_raises(TypeError, p.transform)
+ def test_instance_reuse(self):
+ """test whether the premailer instance can be reused"""
+
+ html_1 = """<html>
+ <head>
+ <title>Title</title>
+ <style type="text/css">
+ h1, h2 { color:red; }
+ strong {
+ text-decoration:none
+ }
+ </style>
+ </head>
+ <body>
+ <h1>Hi!</h1>
+ <p><strong>Yes!</strong></p>
+ </body>
+ </html>"""
+
+ html_2 = """<html>
+ <head>
+ <title>Another Title</title>
+ <style type="text/css">
+ h1, h2 { color:blue; }
+ strong {
+ text-decoration:underline
+ }
+ </style>
+ </head>
+ <body>
+ <h1>Hello!</h1>
+ <p><strong>Nope!</strong></p>
+ </body>
+ </html>"""
+
+ expect_html_1 = """<html>
+ <head>
+ <title>Title</title>
+ </head>
+ <body>
+ <h1 style="color:red">Hi!</h1>
+ <p><strong style="text-decoration:none">Yes!</strong></p>
+ </body>
+ </html>"""
+
+ expect_html_2 = """<html>
+ <head>
+ <title>Another Title</title>
+ </head>
+ <body>
+ <h1 style="color:blue">Hello!</h1>
+ <p><strong style="text-decoration:underline">Nope!</strong></p>
+ </body>
+ </html>"""
+
+ p = Premailer()
+ result_html_1 = p.transform(html_1)
+ result_html_2 = p.transform(html_2)
+
+ compare_html(expect_html_1, result_html_1)
+ compare_html(expect_html_2, result_html_2)
+
def test_remove_classes(self):
"""test the simplest case"""
|
Reusing premailer instances does not work
The README contains a [nice section](https://github.com/peterbe/premailer#if-execution-speed-is-on-your-mind) on speeding up premailer by reusing premailer instances. This, however, [throws an exception](https://github.com/peterbe/premailer/blob/master/premailer/premailer.py#L319) on subsequent iterations because the instance's internal `html` attribute was already set on the first iteration.
Possible solutions:
**a)** Refactor code to allow for multiple runs
**b)** Remove problematic section from documentation
|
0.0
|
350af2440ccc9598d01841d1a22a8e5236da85a1
|
[
"premailer/tests/test_premailer.py::Tests::test_instance_reuse"
] |
[
"premailer/tests/test_premailer.py::Tests::test_3_digit_color_expand",
"premailer/tests/test_premailer.py::Tests::test_align_float_images",
"premailer/tests/test_premailer.py::Tests::test_apple_newsletter_example",
"premailer/tests/test_premailer.py::Tests::test_base_url_fixer",
"premailer/tests/test_premailer.py::Tests::test_base_url_ignore_links",
"premailer/tests/test_premailer.py::Tests::test_base_url_with_path",
"premailer/tests/test_premailer.py::Tests::test_basic_html",
"premailer/tests/test_premailer.py::Tests::test_basic_html_argument_wrong",
"premailer/tests/test_premailer.py::Tests::test_basic_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_basic_html_with_pseudo_selector",
"premailer/tests/test_premailer.py::Tests::test_basic_xml",
"premailer/tests/test_premailer.py::Tests::test_broken_xml",
"premailer/tests/test_premailer.py::Tests::test_capture_cssutils_logging",
"premailer/tests/test_premailer.py::Tests::test_child_selector",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_argument",
"premailer/tests/test_premailer.py::Tests::test_command_line_fileinput_from_stdin",
"premailer/tests/test_premailer.py::Tests::test_command_line_preserve_style_tags",
"premailer/tests/test_premailer.py::Tests::test_comments_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_css_disable_basic_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_disable_leftover_css",
"premailer/tests/test_premailer.py::Tests::test_css_text",
"premailer/tests/test_premailer.py::Tests::test_css_text_with_only_body_present",
"premailer/tests/test_premailer.py::Tests::test_css_with_html_attributes",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_excluded",
"premailer/tests/test_premailer.py::Tests::test_css_with_pseudoclasses_included",
"premailer/tests/test_premailer.py::Tests::test_disabled_validator",
"premailer/tests/test_premailer.py::Tests::test_doctype",
"premailer/tests/test_premailer.py::Tests::test_empty_style_tag",
"premailer/tests/test_premailer.py::Tests::test_external_links",
"premailer/tests/test_premailer.py::Tests::test_external_links_disallow_network",
"premailer/tests/test_premailer.py::Tests::test_external_links_unfindable",
"premailer/tests/test_premailer.py::Tests::test_external_styles_and_links",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_http",
"premailer/tests/test_premailer.py::Tests::test_external_styles_on_https",
"premailer/tests/test_premailer.py::Tests::test_external_styles_with_base_url",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_class_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_element_over_generic",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_id_over_others",
"premailer/tests/test_premailer.py::Tests::test_favour_rule_with_important_over_others",
"premailer/tests/test_premailer.py::Tests::test_fontface_selectors_with_no_selectortext",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_external_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_incorrectly",
"premailer/tests/test_premailer.py::Tests::test_ignore_some_inline_stylesheets",
"premailer/tests/test_premailer.py::Tests::test_ignore_style_elements_with_media_attribute",
"premailer/tests/test_premailer.py::Tests::test_include_star_selector",
"premailer/tests/test_premailer.py::Tests::test_inline_important",
"premailer/tests/test_premailer.py::Tests::test_inline_wins_over_external",
"premailer/tests/test_premailer.py::Tests::test_keyframe_selectors",
"premailer/tests/test_premailer.py::Tests::test_kwargs_html_shortcut_function",
"premailer/tests/test_premailer.py::Tests::test_last_child",
"premailer/tests/test_premailer.py::Tests::test_last_child_exclude_pseudo",
"premailer/tests/test_premailer.py::Tests::test_leftover_important",
"premailer/tests/test_premailer.py::Tests::test_links_without_protocol",
"premailer/tests/test_premailer.py::Tests::test_load_external_url",
"premailer/tests/test_premailer.py::Tests::test_load_external_url_404",
"premailer/tests/test_premailer.py::Tests::test_mailto_url",
"premailer/tests/test_premailer.py::Tests::test_mediaquery",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_basic",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_non_trivial",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_class",
"premailer/tests/test_premailer.py::Tests::test_merge_styles_with_unset",
"premailer/tests/test_premailer.py::Tests::test_mixed_pseudo_selectors",
"premailer/tests/test_premailer.py::Tests::test_multiple_style_elements",
"premailer/tests/test_premailer.py::Tests::test_multithreading",
"premailer/tests/test_premailer.py::Tests::test_parse_style_rules",
"premailer/tests/test_premailer.py::Tests::test_precedence_comparison",
"premailer/tests/test_premailer.py::Tests::test_prefer_inline_to_class",
"premailer/tests/test_premailer.py::Tests::test_pseudo_selectors_without_selector",
"premailer/tests/test_premailer.py::Tests::test_remove_classes",
"premailer/tests/test_premailer.py::Tests::test_remove_unset_properties",
"premailer/tests/test_premailer.py::Tests::test_six_color",
"premailer/tests/test_premailer.py::Tests::test_strip_important",
"premailer/tests/test_premailer.py::Tests::test_style_attribute_specificity",
"premailer/tests/test_premailer.py::Tests::test_style_block_with_external_urls",
"premailer/tests/test_premailer.py::Tests::test_tel_url",
"premailer/tests/test_premailer.py::Tests::test_turnoff_cache_works_as_expected",
"premailer/tests/test_premailer.py::Tests::test_type_test",
"premailer/tests/test_premailer.py::Tests::test_unknown_in_media_queries",
"premailer/tests/test_premailer.py::Tests::test_uppercase_margin",
"premailer/tests/test_premailer.py::Tests::test_xml_cdata"
] |
{
"failed_lite_validators": [
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-02-22 16:57:59+00:00
|
bsd-3-clause
| 4,522
|
|
jonathanj__cacofonix-12
|
diff --git a/src/cacofonix/_app.py b/src/cacofonix/_app.py
index 6b1e7cd..5f867dc 100644
--- a/src/cacofonix/_app.py
+++ b/src/cacofonix/_app.py
@@ -37,7 +37,9 @@ class Application(object):
"""
Parse and validate a fragment from a stream.
"""
- return self.validate_fragment(_yaml.load(fd))
+ fragment = _yaml.load(fd)
+ fragment['issues'] = {str(k): v for k, v in fragment.get('issues', {}).items()}
+ return self.validate_fragment(fragment)
def find_fragments(
self,
@@ -157,12 +159,12 @@ class Application(object):
def compile_fragment_files(
self,
write_fs: FS,
- found_fragments: Iterable[FoundFragment]) -> int:
+ found_fragments: Iterable[FoundFragment]) -> List[str]:
"""
Compile fragment files into `parent_dir`.
"""
- n = 0
- for n, (version_fs, filename) in enumerate(found_fragments, 1):
+ outputs = []
+ for version_fs, filename in found_fragments:
try:
fragment = self.load_fragment(version_fs.readtext(filename))
fragment_type = fragment.get('type')
@@ -186,9 +188,10 @@ class Application(object):
if parent_dir:
write_fs.makedirs(parent_dir, recreate=True)
write_fs.writetext(output_path, rendered_content)
+ outputs.append(output_path)
except Exception:
raise FragmentCompilationError(filename)
- return n
+ return outputs
def render_changelog(
self,
diff --git a/src/cacofonix/_config.py b/src/cacofonix/_config.py
index 46d4528..3c9c3a5 100644
--- a/src/cacofonix/_config.py
+++ b/src/cacofonix/_config.py
@@ -10,11 +10,12 @@ T = TypeVar('T')
default_sections = OrderedDict([('', '')])
default_fragment_types = OrderedDict([
- (u'feature', {'name': u'Added', 'showcontent': True}),
- (u'bugfix', {'name': u'Fixed', 'showcontent': True}),
- (u'doc', {'name': u'Documentation', 'showcontent': True}),
- (u'removal', {'name': u'Removed', 'showcontent': True}),
- (u'misc', {'name': u'Misc', 'showcontent': False}),
+ (u'feature', {'title': u'Added', 'showcontent': True}),
+ (u'change', {'title': u'Changed', 'showcontent': True}),
+ (u'bugfix', {'title': u'Fixed', 'showcontent': True}),
+ (u'doc', {'title': u'Documentation', 'showcontent': True}),
+ (u'removal', {'title': u'Removed', 'showcontent': True}),
+ (u'misc', {'title': u'Misc', 'showcontent': False}),
])
diff --git a/src/cacofonix/main.py b/src/cacofonix/main.py
index 47417d9..84bd8f5 100644
--- a/src/cacofonix/main.py
+++ b/src/cacofonix/main.py
@@ -231,7 +231,7 @@ def compile(app: Application,
new_fragments = list(app.find_new_fragments())
with open_fs('temp://') as tmp_fs:
- n = app.compile_fragment_files(tmp_fs, new_fragments)
+ n = len(app.compile_fragment_files(tmp_fs, new_fragments))
echo('Found {} new changelog fragments'.format(n))
changelog = app.render_changelog(
fs=tmp_fs,
|
jonathanj/cacofonix
|
984d93c51f954266a82a4447a1a6b3cd5644d0f6
|
diff --git a/src/cacofonix/test/data/config.yaml b/src/cacofonix/test/data/config.yaml
new file mode 100644
index 0000000..3570806
--- /dev/null
+++ b/src/cacofonix/test/data/config.yaml
@@ -0,0 +1,10 @@
+# Path in which to find fragments.
+change_fragments_path: fragments
+# Path to the changelog to merge into.
+changelog_path: CHANGELOG.md
+# Marker to add new changes below.
+changelog_marker: |
+ <!-- Generated release notes start. -->
+
+# Type of document to output, valid values are: markdown, rest
+changelog_output_type: markdown
diff --git a/src/cacofonix/test/data/fragments/numeric_issue_number.yaml b/src/cacofonix/test/data/fragments/numeric_issue_number.yaml
new file mode 100644
index 0000000..545e1dc
--- /dev/null
+++ b/src/cacofonix/test/data/fragments/numeric_issue_number.yaml
@@ -0,0 +1,7 @@
+type: bugfix
+section: ''
+issues:
+ 1234: https://example.com/
+feature_flags: []
+description: |-
+ An issue description.
diff --git a/src/cacofonix/test/test_app.py b/src/cacofonix/test/test_app.py
new file mode 100644
index 0000000..83d8f72
--- /dev/null
+++ b/src/cacofonix/test/test_app.py
@@ -0,0 +1,66 @@
+import os.path
+import pytest
+from fs import open_fs
+from fs.base import FS
+from fs.wrap import read_only
+
+from cacofonix._app import Application
+from cacofonix._config import Config
+from cacofonix._effects import SideEffects
+
+
+class MockSideEffects(SideEffects):
+ def __init__(self, root_fs, config):
+ self.root_fs = root_fs
+ self.fragments_fs = self.root_fs.opendir(config.change_fragments_path)
+
+ def archive_fs(self, path: str) -> FS:
+ raise NotImplementedError()
+
+ def changelog_fs(self, path: str) -> FS:
+ raise NotImplementedError()
+
+ def git_mv(self, path: str) -> FS:
+ raise NotImplementedError()
+
+ def git_stage(self, path: str) -> FS:
+ raise NotImplementedError()
+
+
+def open_test_root_fs() -> FS:
+ """
+ Open the filesystem root for the tests.
+ """
+ cwd = os.path.dirname(__file__)
+ return read_only(open_fs('data', cwd=cwd))
+
+
+def load_test_config(root_fs) -> Config:
+ """
+ Load the config files for the tests.
+ """
+ with root_fs.open('config.yaml') as fd:
+ return Config.parse(fd)
+
+
+class TestCompileFragmentFiles:
+ """
+ Tests for `Application.compile_fragment_files`.
+ """
+ def test_numeric_issue_key(self):
+ """
+ Issues with numeric keys can be compiled.
+ """
+ with open_test_root_fs() as root_fs:
+ config = load_test_config(root_fs)
+ effects = MockSideEffects(root_fs, config)
+ app = Application(config, effects)
+ found_fragments = [
+ (effects.fragments_fs, 'numeric_issue_number.yaml'),
+ ]
+ with open_fs('temp://') as write_fs:
+ outputs = app.compile_fragment_files(
+ write_fs,
+ found_fragments)
+ assert len(outputs) == 1
+ assert '#1234' in write_fs.readtext(outputs[0])
|
Coerce issue keys to strings
Towncrier expects issues to be strings and throws an exception if this isn't the case:
```
Traceback (most recent call last): │ts after you do a transaction. It takes 12 minutes and your Yoco will restart.", "endDate": "2019-04-05T09:19:00.000Z", "friendlyName": "NFC Announcement - Ba
File "/usr/local/lib/python3.7/site-packages/cacofonix/_app.py", line 172, in compile_fragment_files │tch 12 & 13", "heading": "Yoco Tap Update", "imageFileUUID": "1554182327949-7e2239ae-ba89-4081-8b4b-a4a5024a06d0", "imageURL": "https://s3-eu-west-1.amazonaws
self.config.changelog_output_type) │.com/yoco-dev/production/sysadmin-bus-uuid/announcement/1554182327949-7e2239ae-ba89-4081-8b4b-a4a5024a06d0/yoco_tap_icon.png", "isDeleted": false, "isGlobal":
File "/usr/local/lib/python3.7/site-packages/cacofonix/_towncrier.py", line 63, in render_fragment │ false, "lastUpdated": "2019-04-02T05:19:46.504Z", "nonDismissible": false, "primaryButtonLink": "http://bit.ly/yocotapbatch6", "primaryButtonText": "Read Mor
for ticket, url in sorted(issues.items()))) │e", "showIfAndroidVersionLessThan": "9.9.9", "showIfIOSVersionLessThan": "9.9.9", "startDate": "2019-04-02T11:30:00.000Z", "targetingCriteria": {"dismissibleA
File "/usr/local/lib/python3.7/site-packages/cacofonix/_towncrier.py", line 63, in <genexpr> │tUserLevel": false}, "uuid": "1554182386490-5097b263-23db-4460-9610-83e928f425ba", "wasImmutable": true}
for ticket, url in sorted(issues.items()))) │ LOG Announcements> Selector> Announcement after its end date
File "/usr/local/lib/python3.7/site-packages/cacofonix/_towncrier.py", line 35, in _ticket_prefix │ LOG Announcements> announcement> {"created": "2019-09-19T13:21:38.531Z", "description": "Thank you for purchasing a Yoco Go card machine. Please update you
if ticket.isdigit(): │r app in order to use your new card machine. Happy transacting!", "endDate": "2019-09-27T13:17:00.000Z", "friendlyName": "New Go Force Update - test", "headin
AttributeError: 'int' object has no attribute 'isdigit'```
|
0.0
|
984d93c51f954266a82a4447a1a6b3cd5644d0f6
|
[
"src/cacofonix/test/test_app.py::TestCompileFragmentFiles::test_numeric_issue_key"
] |
[] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-28 11:37:25+00:00
|
mit
| 3,328
|
|
mikeckennedy__markdown-subtemplate-8
|
diff --git a/markdown_subtemplate/__init__.py b/markdown_subtemplate/__init__.py
index 82b3d4f..e1fe900 100644
--- a/markdown_subtemplate/__init__.py
+++ b/markdown_subtemplate/__init__.py
@@ -3,7 +3,7 @@ markdown_subtemplate - A template engine to render
Markdown with external template imports and variable replacements.
"""
-__version__ = '0.1.20'
+__version__ = '0.2.21'
__author__ = 'Michael Kennedy <[email protected]>'
__all__ = []
diff --git a/markdown_subtemplate/infrastructure/page.py b/markdown_subtemplate/infrastructure/page.py
index d4e8d94..0f9a67e 100644
--- a/markdown_subtemplate/infrastructure/page.py
+++ b/markdown_subtemplate/infrastructure/page.py
@@ -7,6 +7,7 @@ from markdown_subtemplate.infrastructure import markdown_transformer
from markdown_subtemplate.exceptions import ArgumentExpectedException, TemplateNotFoundException
from markdown_subtemplate import logging as __logging
import markdown_subtemplate.storage as __storage
+from markdown_subtemplate.logging import SubtemplateLogger
from markdown_subtemplate.storage import SubtemplateStorage
@@ -37,11 +38,14 @@ def get_page(template_path: str, data: Dict[str, Any]) -> str:
# Get the markdown with imports and substitutions
markdown = get_markdown(template_path)
+ inline_variables = {}
+ markdown = get_inline_variables(markdown, inline_variables, log)
# Convert markdown to HTML
html = get_html(markdown)
cache.add_html(key, key, html)
- html = process_variables(html, data)
+ full_data = {**data, **inline_variables}
+ html = process_variables(html, full_data)
dt = datetime.datetime.now() - t0
@@ -174,3 +178,42 @@ def process_variables(raw_text: str, data: Dict[str, Any]) -> str:
transformed_text = transformed_text.replace(key_placeholders[key], str(data[key]))
return transformed_text
+
+
+def get_inline_variables(markdown: str, new_vars: Dict[str, str], log: SubtemplateLogger) -> str:
+ lines: List[str] = markdown.split('\n')
+ pattern = '[VARIABLE '
+
+ final_lines = []
+
+ for l in lines:
+
+ if not( l and l.upper().startswith(pattern)):
+ final_lines.append(l)
+ continue
+
+ text = l[len(pattern):].strip("]")
+ parts = text.split('=')
+ if len(parts) != 2:
+ log.error(f"Invalid variable definition in markdown: {l}.")
+ continue
+
+ name = parts[0].strip().upper()
+ value = parts[1].strip()
+ has_quotes = (
+ (value.startswith('"') or value.startswith("'")) and
+ (value.endswith('"') or value.endswith("'"))
+ )
+
+ if not has_quotes:
+ log.error(f"Invalid variable definition in markdown, missing quotes surrounding value: {l}.")
+ continue
+
+ value = value.strip('\'"').strip()
+
+ new_vars[name]=value
+
+ if new_vars:
+ return "\n".join(final_lines)
+ else:
+ return markdown
|
mikeckennedy/markdown-subtemplate
|
29737dbad109ee3d943872751bdb11f64df51431
|
diff --git a/tests/page_tests.py b/tests/page_tests.py
index 419223e..403be3b 100644
--- a/tests/page_tests.py
+++ b/tests/page_tests.py
@@ -2,10 +2,10 @@ import os
import pytest
-from markdown_subtemplate import exceptions
from markdown_subtemplate import engine
-from markdown_subtemplate.storage.file_storage import FileStore
+from markdown_subtemplate import exceptions
from markdown_subtemplate.infrastructure import page
+from markdown_subtemplate.storage.file_storage import FileStore
FileStore.set_template_folder(
os.path.join(os.path.dirname(__file__), 'templates'))
@@ -46,7 +46,6 @@ We have a paragraph with [a link](https://talkpython.fm).
def test_basic_markdown_html():
template = os.path.join('home', 'basic_markdown.md')
html = engine.get_page(template, {'a': 1, 'b': 2})
- print("HTML", html)
text = '''
<h1>This is the basic title</h1>
@@ -147,6 +146,23 @@ And more inline **content**.
assert text == md.strip()
+def test_variable_definition_markdown():
+ template = os.path.join('home', 'variables.md')
+ html = page.get_page(template, {})
+
+ text = '''
+<h1>This page defines a variable.</h1>
+
+<p>We have a paragraph with <a href="https://talkpython.fm">a link</a>.</p>
+
+<h3>This page had a title set: Variables rule!</h3>
+
+<p>And more content with the word TITLE.</p>
+'''.strip()
+
+ assert text == html.strip()
+
+
def test_no_lowercase_replacements_markdown():
template = os.path.join('home', 'replacements_case_error.md')
md = page.get_markdown(template, {'title': 'the title', 'link': 'The link'})
diff --git a/tests/templates/home/variables.md b/tests/templates/home/variables.md
new file mode 100644
index 0000000..afbdf24
--- /dev/null
+++ b/tests/templates/home/variables.md
@@ -0,0 +1,7 @@
+# This page defines a variable.
+
+We have a paragraph with [a link](https://talkpython.fm).
+
+[VARIABLE title="Variables rule!"]
+
+[IMPORT REPLACEMENTS]
|
Allow defining a variable to be subsequently passed to import sections
Here's what I have in mind. We have a landing page and want to specify which campaign this is about all the while reusing most of the content:
```markdown
# The landing page title
Welcome people from this campaign
[VARIABLE name=value]
[IMPORT section1] # {name: value} is added to the variables passed here.
[IMPORT section2]
[IMPORT section3]
```
|
0.0
|
29737dbad109ee3d943872751bdb11f64df51431
|
[
"tests/page_tests.py::test_variable_definition_markdown"
] |
[
"tests/page_tests.py::test_missing_template_by_file",
"tests/page_tests.py::test_missing_template_by_folder",
"tests/page_tests.py::test_empty_template",
"tests/page_tests.py::test_basic_markdown_template",
"tests/page_tests.py::test_basic_markdown_html",
"tests/page_tests.py::test_import_markdown",
"tests/page_tests.py::test_nested_import_markdown",
"tests/page_tests.py::test_variable_replacement_markdown",
"tests/page_tests.py::test_two_imports_markdown",
"tests/page_tests.py::test_no_lowercase_replacements_markdown",
"tests/page_tests.py::test_html_with_replacement",
"tests/page_tests.py::test_html_with_embedded_html",
"tests/page_tests.py::test_missing_import_markdown"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-09-06 22:22:15+00:00
|
mit
| 3,940
|
|
ronaldoussoren__macholib-38
|
diff --git a/macholib/MachO.py b/macholib/MachO.py
index 3db9520..d4d85f1 100644
--- a/macholib/MachO.py
+++ b/macholib/MachO.py
@@ -92,7 +92,10 @@ def lc_str_value(offset, cmd_info):
class MachO(object):
"""
- Provides reading/writing the Mach-O header of a specific existing file
+ Provides reading/writing the Mach-O header of a specific existing file.
+
+ If allow_unknown_load_commands is True, allows unknown load commands.
+ Otherwise, raises ValueError if the file contains an unknown load command.
"""
# filename - the original filename of this mach-o
@@ -104,7 +107,7 @@ class MachO(object):
# low_offset - essentially, the maximum mach-o header size
# id_cmd - the index of my id command, or None
- def __init__(self, filename):
+ def __init__(self, filename, allow_unknown_load_commands=False):
# supports the ObjectGraph protocol
self.graphident = filename
@@ -114,6 +117,7 @@ class MachO(object):
# initialized by load
self.fat = None
self.headers = []
+ self.allow_unknown_load_commands = allow_unknown_load_commands
with open(filename, "rb") as fp:
self.load(fp)
@@ -165,7 +169,7 @@ class MachO(object):
magic, hdr, endian = MH_CIGAM_64, mach_header_64, "<"
else:
raise ValueError("Unknown Mach-O header: 0x%08x in %r" % (header, fh))
- hdr = MachOHeader(self, fh, offset, size, magic, hdr, endian)
+ hdr = MachOHeader(self, fh, offset, size, magic, hdr, endian, self.allow_unknown_load_commands)
self.headers.append(hdr)
def write(self, f):
@@ -175,7 +179,10 @@ class MachO(object):
class MachOHeader(object):
"""
- Provides reading/writing the Mach-O header of a specific existing file
+ Provides reading/writing the Mach-O header of a specific existing file.
+
+ If allow_unknown_load_commands is True, allows unknown load commands.
+ Otherwise, raises ValueError if the file contains an unknown load command.
"""
# filename - the original filename of this mach-o
@@ -187,7 +194,7 @@ class MachOHeader(object):
# low_offset - essentially, the maximum mach-o header size
# id_cmd - the index of my id command, or None
- def __init__(self, parent, fh, offset, size, magic, hdr, endian):
+ def __init__(self, parent, fh, offset, size, magic, hdr, endian, allow_unknown_load_commands=False):
self.MH_MAGIC = magic
self.mach_header = hdr
@@ -206,6 +213,8 @@ class MachOHeader(object):
self.filetype = None
self.headers = []
+ self.allow_unknown_load_commands = allow_unknown_load_commands
+
self.load(fh)
def __repr__(self):
@@ -242,7 +251,16 @@ class MachOHeader(object):
# read the specific command
klass = LC_REGISTRY.get(cmd_load.cmd, None)
if klass is None:
- raise ValueError("Unknown load command: %d" % (cmd_load.cmd,))
+ if not self.allow_unknown_load_commands:
+ raise ValueError("Unknown load command: %d" % (cmd_load.cmd,))
+ # No load command in the registry, so append the load command itself
+ # instead of trying to deserialize the data after the header.
+ data_size = cmd_load.cmdsize - sizeof(load_command)
+ cmd_data = fh.read(data_size)
+ cmd.append((cmd_load, cmd_load, cmd_data))
+ read_bytes += cmd_load.cmdsize
+ continue
+
cmd_cmd = klass.from_fileobj(fh, **kw)
if cmd_load.cmd == LC_ID_DYLIB:
|
ronaldoussoren/macholib
|
53d9c7a4056af795990a8db2af71e17f05f59460
|
diff --git a/macholib_tests/test_MachO.py b/macholib_tests/test_MachO.py
index c32ea70..ed3d808 100644
--- a/macholib_tests/test_MachO.py
+++ b/macholib_tests/test_MachO.py
@@ -1,17 +1,92 @@
+import contextlib
+import os
+import struct
import sys
+import tempfile
+import uuid
-from macholib import MachO
+from macholib import MachO, mach_o
if sys.version_info[:2] <= (2, 6):
import unittest2 as unittest
else:
import unittest
[email protected]
+def temporary_macho_file(load_commands):
+ struct_mach_header_64_format = '>IIIIIIII'
+ cpu_type_arm64 = 0x100000C
+ cpu_subtype_arm_all = 0x0
+ mh_filetype_execute = 0x2
+ ncmds = len(load_commands)
+ sizeofcmds = sum([len(lc) for lc in load_commands])
+ mach_header = struct.pack(struct_mach_header_64_format, mach_o.MH_MAGIC_64,
+ cpu_type_arm64, cpu_subtype_arm_all,
+ mh_filetype_execute, ncmds, sizeofcmds, 0, 0)
+ with tempfile.NamedTemporaryFile(delete=False) as macho_file:
+ macho_file.write(mach_header)
+ for lc in load_commands:
+ macho_file.write(lc)
+ # Close the file so it can be re-opened on Windows.
+ macho_file.close()
+ yield macho_file.name
+ os.unlink(macho_file.name)
+
+
+def lc_uuid(macho_uuid):
+ lc_uuid_format = '>II16s'
+ lc_uuid_size = struct.calcsize(lc_uuid_format)
+ return struct.pack(lc_uuid_format, mach_o.LC_UUID, lc_uuid_size, macho_uuid.bytes)
+
+
+def lc_unknown():
+ lc_unknown_format = '>III'
+ lc_unknown = 0x707A11ED # Made-up load command. Hopefully never used.
+ lc_unknown_size = struct.calcsize(lc_unknown_format)
+ lc_unknown_value = 42 # Random value
+ return struct.pack(lc_unknown_format, lc_unknown, lc_unknown_size, lc_unknown_value)
+
class TestMachO(unittest.TestCase):
- @unittest.expectedFailure
- def test_missing(self):
- self.fail("tests are missing")
+ def test_known_load_command_should_succeed(self):
+ macho_uuid = uuid.UUID('6894C0AE-C8B7-4E0B-A529-30BBEBA3703B')
+ with temporary_macho_file([lc_uuid(macho_uuid)]) as macho_filename:
+ macho = MachO.MachO(macho_filename, allow_unknown_load_commands=True)
+ self.assertEqual(len(macho.headers), 1)
+ self.assertEqual(len(macho.headers[0].commands), 1)
+ load_command, command, _ = macho.headers[0].commands[0]
+ self.assertEqual(load_command.cmd, mach_o.LC_UUID)
+ self.assertEqual(uuid.UUID(bytes=command.uuid), macho_uuid)
+
+ def test_unknown_load_command_should_fail(self):
+ with temporary_macho_file([lc_unknown()]) as macho_filename:
+ with self.assertRaises(ValueError) as assert_context:
+ MachO.MachO(macho_filename)
+
+ def test_unknown_load_command_should_succeed_with_flag(self):
+ with temporary_macho_file([lc_unknown()]) as macho_filename:
+ macho = MachO.MachO(macho_filename, allow_unknown_load_commands=True)
+ self.assertEqual(len(macho.headers), 1)
+ self.assertEqual(len(macho.headers[0].commands), 1)
+ load_command, command, data = macho.headers[0].commands[0]
+ self.assertEqual(load_command.cmd, 0x707A11ED)
+ self.assertIsInstance(command, mach_o.load_command)
+ self.assertEqual(struct.unpack('>I', data), (42,))
+
+
+ def test_mix_of_known_and_unknown_load_commands_should_allow_unknown_with_flag(self):
+ macho_uuid = uuid.UUID('6894C0AE-C8B7-4E0B-A529-30BBEBA3703B')
+ with temporary_macho_file([lc_unknown(), lc_uuid(macho_uuid)]) as macho_filename:
+ macho = MachO.MachO(macho_filename, allow_unknown_load_commands=True)
+ self.assertEqual(len(macho.headers), 1)
+ self.assertEqual(len(macho.headers[0].commands), 2)
+ load_command, command, data = macho.headers[0].commands[0]
+ self.assertEqual(load_command.cmd, 0x707A11ED)
+ self.assertIsInstance(command, mach_o.load_command)
+ self.assertEqual(struct.unpack('>I', data), (42,))
+ load_command, command, _ = macho.headers[0].commands[1]
+ self.assertEqual(load_command.cmd, mach_o.LC_UUID)
+ self.assertEqual(uuid.UUID(bytes=command.uuid), macho_uuid)
if __name__ == "__main__":
|
Feature: Option for ignoring unknown load commands
**[Original report](https://bitbucket.org/ronaldoussoren/macholib/issue/26) by Mikkel Kamstrup Erlandsen (Bitbucket: [kamikkel](https://bitbucket.org/kamikkel), GitHub: [kamikkel](https://github.com/kamikkel)).**
----------------------------------------
First of all - thanks for a great library! <3
We were hit by the unknown load commands 0x31 and 0x32 that were just recently implemented in macholib. Updating fixed this, of course, but it would be nice to guard against similar issues with an option for the parser to just skip (or print a warning on) unknown load commands.
Background: We use macholib for some simple validation of user-submitted iOS apps. We would like to be able to process newly built apps without redeployment of our toolchain (just recording any warnings it may produce) - disregarding what Apple may come up with in the future.
|
0.0
|
53d9c7a4056af795990a8db2af71e17f05f59460
|
[
"macholib_tests/test_MachO.py::TestMachO::test_known_load_command_should_succeed",
"macholib_tests/test_MachO.py::TestMachO::test_mix_of_known_and_unknown_load_commands_should_allow_unknown_with_flag",
"macholib_tests/test_MachO.py::TestMachO::test_unknown_load_command_should_succeed_with_flag"
] |
[
"macholib_tests/test_MachO.py::TestMachO::test_unknown_load_command_should_fail"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-03-03 20:45:23+00:00
|
mit
| 5,284
|
|
aws-cloudformation__cloudformation-cli-python-plugin-236
|
diff --git a/README.md b/README.md
index cd5ec66..6854fe5 100644
--- a/README.md
+++ b/README.md
@@ -12,6 +12,14 @@ This plugin library helps to provide runtime bindings for the execution of your
[](https://travis-ci.com/aws-cloudformation/cloudformation-cli-python-plugin)
+## Community
+
+Join us on Discord! Connect & interact with CloudFormation developers &
+experts, find channels to discuss and get help for our CLIs, cfn-lint, CloudFormation registry, StackSets,
+Guard and more:
+
+[](https://discord.gg/9zpd7TTRwq)
+
Installation
------------
diff --git a/python/rpdk/python/codegen.py b/python/rpdk/python/codegen.py
index f5c49cf..60174aa 100644
--- a/python/rpdk/python/codegen.py
+++ b/python/rpdk/python/codegen.py
@@ -334,14 +334,24 @@ class Python36LanguagePlugin(LanguagePlugin):
LOG.warning("Starting pip build.")
try:
- completed_proc = subprocess_run( # nosec
- command,
- stdout=PIPE,
- stderr=PIPE,
- cwd=base_path,
- check=True,
- shell=True,
- )
+ # On windows run pip command through the default shell (CMD)
+ if os.name == "nt":
+ completed_proc = subprocess_run( # nosec
+ command,
+ stdout=PIPE,
+ stderr=PIPE,
+ cwd=base_path,
+ check=True,
+ shell=True,
+ )
+ else:
+ completed_proc = subprocess_run( # nosec
+ command,
+ stdout=PIPE,
+ stderr=PIPE,
+ cwd=base_path,
+ check=True,
+ )
LOG.warning("pip build finished.")
except (FileNotFoundError, CalledProcessError) as e:
raise DownstreamError("pip build failed") from e
|
aws-cloudformation/cloudformation-cli-python-plugin
|
1866ad7cd1b3c000cf9ce07ee90421a8788dc766
|
diff --git a/tests/plugin/codegen_test.py b/tests/plugin/codegen_test.py
index a36d04b..aa64261 100644
--- a/tests/plugin/codegen_test.py
+++ b/tests/plugin/codegen_test.py
@@ -416,6 +416,43 @@ def test__build_pip(plugin):
mock_pip.assert_called_once_with(sentinel.base_path)
+def test__build_pip_posix(plugin):
+ patch_os_name = patch("rpdk.python.codegen.os.name", "posix")
+ patch_subproc = patch("rpdk.python.codegen.subprocess_run")
+
+ # Path must be set outside simulated os.name
+ temppath = Path(str(sentinel.base_path))
+ with patch_os_name, patch_subproc as mock_subproc:
+ plugin._pip_build(temppath)
+
+ mock_subproc.assert_called_once_with(
+ plugin._make_pip_command(temppath),
+ stdout=ANY,
+ stderr=ANY,
+ cwd=temppath,
+ check=ANY,
+ )
+
+
+def test__build_pip_windows(plugin):
+ patch_os_name = patch("rpdk.python.codegen.os.name", "nt")
+ patch_subproc = patch("rpdk.python.codegen.subprocess_run")
+
+ # Path must be set outside simulated os.name
+ temppath = Path(str(sentinel.base_path))
+ with patch_os_name, patch_subproc as mock_subproc:
+ plugin._pip_build(temppath)
+
+ mock_subproc.assert_called_once_with(
+ plugin._make_pip_command(temppath),
+ stdout=ANY,
+ stderr=ANY,
+ cwd=temppath,
+ check=ANY,
+ shell=True,
+ )
+
+
def test__build_docker(plugin):
plugin._use_docker = True
|
It looks like the upgrade to 2.1.6 has broken dependency installation in `cfn submit --dry-run`
The pip installation in `cfn submit` appears to log pip usage instructions to stdout instead of actually installing the depenencies.
If we downgrade to 2.1.5 then the dependencies are included in the build dir.
If we run 2.1.6, then the build directory does not contain any of the pip dependencies that we'd expect.
This is consistent if I downgrade and then re-upgrade, but I don't have a public project i can share to demonstrate the behaviour.
Example .rpdk-config:
```{
"artifact_type": "HOOK",
"typeName": "XXX::XXX::XXX",
"language": "python37",
"runtime": "python3.7",
"entrypoint": "xxx.handlers.hook",
"testEntrypoint": "xxx.handlers.hook",
"settings": {
"version": false,
"subparser_name": null,
"verbose": 0,
"force": false,
"type_name": null,
"artifact_type": null,
"endpoint_url": null,
"region": null,
"target_schemas": [],
"use_docker": false,
"protocolVersion": "2.0.0"
}
}
```
|
0.0
|
1866ad7cd1b3c000cf9ce07ee90421a8788dc766
|
[
"tests/plugin/codegen_test.py::test__build_pip_posix"
] |
[
"tests/plugin/codegen_test.py::test_validate_no[y-True]",
"tests/plugin/codegen_test.py::test_validate_no[Y-True]",
"tests/plugin/codegen_test.py::test_validate_no[yes-True]",
"tests/plugin/codegen_test.py::test_validate_no[Yes-True]",
"tests/plugin/codegen_test.py::test_validate_no[YES-True]",
"tests/plugin/codegen_test.py::test_validate_no[asdf-True]",
"tests/plugin/codegen_test.py::test_validate_no[no-False]",
"tests/plugin/codegen_test.py::test_validate_no[No-False0]",
"tests/plugin/codegen_test.py::test_validate_no[No-False1]",
"tests/plugin/codegen_test.py::test_validate_no[n-False]",
"tests/plugin/codegen_test.py::test_validate_no[N-False]",
"tests/plugin/codegen_test.py::test__remove_build_artifacts_file_found",
"tests/plugin/codegen_test.py::test__remove_build_artifacts_file_not_found",
"tests/plugin/codegen_test.py::test_initialize_resource",
"tests/plugin/codegen_test.py::test_initialize_hook",
"tests/plugin/codegen_test.py::test_package_resource_pip",
"tests/plugin/codegen_test.py::test__pip_build_executable_not_found",
"tests/plugin/codegen_test.py::test__pip_build_called_process_error",
"tests/plugin/codegen_test.py::test__build_pip",
"tests/plugin/codegen_test.py::test__build_pip_windows",
"tests/plugin/codegen_test.py::test__build_docker",
"tests/plugin/codegen_test.py::test__build_docker_posix",
"tests/plugin/codegen_test.py::test__build_docker_windows",
"tests/plugin/codegen_test.py::test__build_docker_no_euid",
"tests/plugin/codegen_test.py::test__docker_build_good_path",
"tests/plugin/codegen_test.py::test_get_plugin_information",
"tests/plugin/codegen_test.py::test__docker_build_bad_path[<lambda>0]",
"tests/plugin/codegen_test.py::test__docker_build_bad_path[ImageLoadError]",
"tests/plugin/codegen_test.py::test__docker_build_bad_path[<lambda>1]",
"tests/plugin/codegen_test.py::test__docker_build_bad_path[<lambda>2]"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-12-19 18:37:41+00:00
|
apache-2.0
| 1,278
|
|
zopefoundation__persistent-161
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 542e403..81a77a8 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -12,6 +12,10 @@
when setting its ``__class__`` and ``__dict__``. This matches the
behaviour of the C implementation. See `issue 155
<https://github.com/zopefoundation/persistent/issues/155>`_.
+- Fix the CFFI cache implementation (used on CPython when
+ ``PURE_PYTHON=1``) to not print unraisable ``AttributeErrors`` from
+ ``_WeakValueDictionary`` during garbage collection. See `issue 150
+ <https://github.com/zopefoundation/persistent/issues/150>`_.
4.6.4 (2020-03-26)
==================
diff --git a/persistent/picklecache.py b/persistent/picklecache.py
index 43d5067..3dde6c3 100644
--- a/persistent/picklecache.py
+++ b/persistent/picklecache.py
@@ -102,7 +102,13 @@ class _WeakValueDictionary(object):
return self._cast(addr, self._py_object).value
def cleanup_hook(self, cdata):
- oid = self._addr_to_oid.pop(cdata.pobj_id, None)
+ # This is called during GC, possibly at interpreter shutdown
+ # when the __dict__ of this object may have already been cleared.
+ try:
+ addr_to_oid = self._addr_to_oid
+ except AttributeError:
+ return
+ oid = addr_to_oid.pop(cdata.pobj_id, None)
self._data.pop(oid, None)
def __contains__(self, oid):
|
zopefoundation/persistent
|
6b500fc3f517fb566e8d0b85b756ddf49758c8d8
|
diff --git a/persistent/tests/test_picklecache.py b/persistent/tests/test_picklecache.py
index 32811f1..9d6ec25 100644
--- a/persistent/tests/test_picklecache.py
+++ b/persistent/tests/test_picklecache.py
@@ -1102,6 +1102,44 @@ class PythonPickleCacheTests(PickleCacheTestMixin, unittest.TestCase):
self.assertEqual(cache.cache_non_ghost_count, 0)
self.assertEqual(len(cache), 0)
+ def test_interpreter_finalization_ffi_cleanup(self):
+ # When the interpreter is busy garbage collecting old objects
+ # and clearing their __dict__ in random orders, the CFFI cleanup
+ # ``ffi.gc()`` cleanup hooks we use on CPython don't
+ # raise errors.
+ #
+ # Prior to Python 3.8, when ``sys.unraisablehook`` was added,
+ # the only way to know if this test fails is to look for AttributeError
+ # on stderr.
+ #
+ # But wait, it gets worse. Prior to https://foss.heptapod.net/pypy/cffi/-/issues/492
+ # (CFFI > 1.14.5, unreleased at this writing), CFFI ignores
+ # ``sys.unraisablehook``, so even on 3.8 the only way to know
+ # a failure is to watch stderr.
+ #
+ # See https://github.com/zopefoundation/persistent/issues/150
+
+ import sys
+ unraised = []
+ try:
+ old_hook = sys.unraisablehook
+ except AttributeError:
+ pass
+ else: # pragma: no cover
+ sys.unraisablehook = unraised.append
+ self.addCleanup(setattr, sys, 'unraisablehook', old_hook)
+
+ cache = self._makeOne()
+ oid = self._numbered_oid(42)
+ o = cache[oid] = self._makePersist(oid=oid)
+ # Clear the dict, or at least part of it.
+ # This is coupled to ``cleanup_hook``
+ if cache.data.cleanup_hook:
+ del cache.data._addr_to_oid
+ del cache[oid]
+
+ self.assertEqual(unraised, [])
+
@skipIfNoCExtension
class CPickleCacheTests(PickleCacheTestMixin, unittest.TestCase):
@@ -1182,6 +1220,30 @@ class CPickleCacheTests(PickleCacheTestMixin, unittest.TestCase):
self.assertEqual(len(cache), 0)
+class TestWeakValueDictionary(unittest.TestCase):
+
+ def _getTargetClass(self):
+ from persistent.picklecache import _WeakValueDictionary
+ return _WeakValueDictionary
+
+ def _makeOne(self):
+ return self._getTargetClass()()
+
+ @unittest.skipIf(PYPY, "PyPy doesn't have the cleanup_hook")
+ def test_cleanup_hook_gc(self):
+ # A more targeted test than ``test_interpreter_finalization_ffi_cleanup``
+ # See https://github.com/zopefoundation/persistent/issues/150
+ wvd = self._makeOne()
+
+ class cdata(object):
+ o = object()
+ pobj_id = id(o)
+ wvd['key'] = cdata.o
+
+ wvd.__dict__.clear()
+ wvd.cleanup_hook(cdata)
+
+
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
AttributeError: '_WeakValueDictionary' object has no attribute '_addr_to_oid'
Seen during test teardown of a large app using CPython in PURE_PYTHON=1 mode
```
From callback for ffi.gc <cdata 'struct CPersistentRingCFFI_struct *' owning 24 bytes>:
Traceback (most recent call last):
File "//persistent/persistent/picklecache.py", line 105, in cleanup_hook
oid = self._addr_to_oid.pop(cdata.pobj_id, None)
AttributeError: '_WeakValueDictionary' object has no attribute '_addr_to_oid'
```
This is ignored, and the dict is already gone, so there shouldn't be any harm, but it is annoying.
|
0.0
|
6b500fc3f517fb566e8d0b85b756ddf49758c8d8
|
[
"persistent/tests/test_picklecache.py::TestWeakValueDictionary::test_cleanup_hook_gc"
] |
[
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___delitem___non_string_oid_raises_TypeError",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___delitem___nonesuch_raises_KeyError",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___delitem___w_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___delitem___w_normal_object",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___delitem___w_persistent_class",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___delitem___w_remaining_object",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___getitem___nonesuch_raises_KeyError",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___duplicate_oid_raises_ValueError",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___duplicate_oid_same_obj",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___mismatch_key_oid",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___non_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___non_string_oid_raises_TypeError",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test___setitem___persistent_class",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_cache_garbage_collection_bytes_also_deactivates_object",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_cache_garbage_collection_bytes_with_cache_size_0",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_cache_raw",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_cache_size",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_cannot_update_mru_while_already_locked",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_class_conforms_to_IPickleCache",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_debug_info_w_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_debug_info_w_normal_object",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_debug_info_w_persistent_class",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_empty",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_full_sweep",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_full_sweep_w_changed",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_full_sweep_w_sticky",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_get_nonesuch_no_default",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_get_nonesuch_w_default",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_incrgc_simple",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_incrgc_w_larger_drain_resistance",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_incrgc_w_smaller_drain_resistance",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_init_with_cacheless_jar",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_instance_conforms_to_IPickleCache",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_interpreter_finalization_ffi_cleanup",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_hit_multiple_mixed",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_hit_multiple_non_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_hit_pclass",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_hit_single_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_hit_single_non_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_miss_multiple",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_miss_single",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_invalidate_persistent_class_calls_p_invalidate",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_lruitems",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_minimize",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_minimize_turns_into_ghosts",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_mru_first",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_mru_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_mru_last",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_mru_nonesuch_raises_KeyError",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_mru_normal",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_mru_was_ghost_now_active",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_non_persistent_object",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_obj_already_has_jar",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_obj_already_has_oid",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_obj_already_in_cache",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_success_already_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_success_not_already_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_w_pclass_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_new_ghost_w_pclass_non_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_reify_hit_multiple_mixed",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_reify_hit_single_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_reify_hit_single_non_ghost",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_reify_miss_multiple",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_reify_miss_single",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_setting_already_cached",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_setting_non_persistent_item",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_setting_without_jar",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_sweep_empty",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_sweep_of_non_deactivating_object",
"persistent/tests/test_picklecache.py::PythonPickleCacheTests::test_update_object_size_estimation_simple",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___delitem___non_string_oid_raises_TypeError",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___delitem___nonesuch_raises_KeyError",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___delitem___w_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___delitem___w_normal_object",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___delitem___w_persistent_class",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___delitem___w_remaining_object",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___getitem___nonesuch_raises_KeyError",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___duplicate_oid_raises_ValueError",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___duplicate_oid_same_obj",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___mismatch_key_oid",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___non_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___non_string_oid_raises_TypeError",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test___setitem___persistent_class",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_cache_garbage_collection_bytes_with_cache_size_0",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_cache_raw",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_cache_size",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_class_conforms_to_IPickleCache",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_debug_info_w_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_debug_info_w_normal_object",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_debug_info_w_persistent_class",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_empty",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_full_sweep",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_get_nonesuch_no_default",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_get_nonesuch_w_default",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_incrgc_simple",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_incrgc_w_larger_drain_resistance",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_incrgc_w_smaller_drain_resistance",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_inst_does_not_conform_to_IExtendedPickleCache",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_instance_conforms_to_IPickleCache",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_invalidate_hit_multiple_non_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_invalidate_hit_single_non_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_invalidate_miss_multiple",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_invalidate_miss_single",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_invalidate_persistent_class_calls_p_invalidate",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_lruitems",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_minimize",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_minimize_turns_into_ghosts",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_non_persistent_object",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_obj_already_has_jar",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_obj_already_has_oid",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_obj_already_in_cache",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_success_already_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_success_not_already_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_w_pclass_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_new_ghost_w_pclass_non_ghost",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_setting_already_cached",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_setting_non_persistent_item",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_setting_without_jar",
"persistent/tests/test_picklecache.py::CPickleCacheTests::test_sweep_empty",
"persistent/tests/test_picklecache.py::test_suite"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-03-10 14:52:25+00:00
|
zpl-2.1
| 6,392
|
|
ofek__pypinfo-109
|
diff --git a/pypinfo/core.py b/pypinfo/core.py
index 1075ea9..41bbd8c 100644
--- a/pypinfo/core.py
+++ b/pypinfo/core.py
@@ -10,7 +10,7 @@ from google.cloud.bigquery.job import QueryJobConfig
from pypinfo.fields import AGGREGATES, Downloads
FROM = 'FROM `the-psf.pypi.file_downloads`'
-DATE_ADD = 'DATE_ADD(CURRENT_TIMESTAMP(), INTERVAL {} DAY)'
+DATE_ADD = 'TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL {} DAY)'
START_TIMESTAMP = 'TIMESTAMP("{} 00:00:00")'
END_TIMESTAMP = 'TIMESTAMP("{} 23:59:59")'
START_DATE = '-31'
|
ofek/pypinfo
|
0007c10256804650b4787a14a32e38e8f9347bbc
|
diff --git a/tests/test_core.py b/tests/test_core.py
index 6965e5d..e873e91 100644
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -88,7 +88,7 @@ def test_format_date_negative_number():
date = core.format_date("-1", dummy_format)
# Assert
- assert date == 'DATE_ADD(CURRENT_TIMESTAMP(), INTERVAL -1 DAY)'
+ assert date == 'TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL -1 DAY)'
def test_format_date_yyy_mm_dd():
|
No matching signature for function DATE_ADD for argument types: TIMESTAMP, INTERVAL INT64 DATE_TIME_PART
Hi,
After installation following instructions given in the `README.md`, I tried a simplest call:
```bash
pypinfo requests
```
Unfortunately getting following exception:
```
...
File "/Users/christianr/.venv/sett/lib/python3.9/site-packages/pypinfo/cli.py", line 166, in pypinfo
query_rows = query_job.result(timeout=timeout // 1000)
File "/Users/christianr/.venv/sett/lib/python3.9/site-packages/google/cloud/bigquery/job/query.py", line 1160, in result
super(QueryJob, self).result(retry=retry, timeout=timeout)
File "/Users/christianr/.venv/sett/lib/python3.9/site-packages/google/cloud/bigquery/job/base.py", line 631, in result
return super(_AsyncJob, self).result(timeout=timeout, **kwargs)
File "/Users/christianr/.venv/sett/lib/python3.9/site-packages/google/api_core/future/polling.py", line 134, in result
raise self._exception
google.api_core.exceptions.BadRequest: 400 No matching signature for function DATE_ADD for argument types: TIMESTAMP, INTERVAL INT64 DATE_TIME_PART. Supported signature: DATE_ADD(DATE, INTERVAL INT64 DATE_TIME_PART) at [5:25]
(job ID: 947e6084-e5e6-4bb5-ae12-bb8faad8ec0b)
-----Query Job SQL Follows-----
| . | . | . | . | . | . | . | . | . | . | . | . |
1:SELECT
2: FORMAT_TIMESTAMP("%Y", timestamp) as download_year,
3: COUNT(*) as download_count,
4:FROM `the-psf.pypi.file_downloads`
5:WHERE timestamp BETWEEN DATE_ADD(CURRENT_TIMESTAMP(), INTERVAL -1826 DAY) AND DATE_ADD(CURRENT_TIMESTAMP(), INTERVAL -1 DAY)
6: AND file.project = "blist"
7: AND details.installer.name = "pip"
8:GROUP BY
9: download_year
10:ORDER BY
11: download_count DESC
12:LIMIT 10
| . | . | . | . | . | . | . | . | . | . | . | . |
```
Using the SQL query in the **BigQuery** SQL workspace pops up the same error message if I use the _public PyPI download statistics dataset_.
My environment:
```
pypinfo, version 18.0.0
Python 3.9.1
```
|
0.0
|
0007c10256804650b4787a14a32e38e8f9347bbc
|
[
"tests/test_core.py::test_format_date_negative_number"
] |
[
"tests/test_core.py::test_tabulate_markdown",
"tests/test_core.py::test_normalize[Pillow-pillow]",
"tests/test_core.py::test_normalize[setuptools_scm-setuptools-scm]",
"tests/test_core.py::test_validate_date_valid[2018-05-15]",
"tests/test_core.py::test_build_query",
"tests/test_core.py::test_normalize_dates_yyy_mm_dd_and_negative_integer",
"tests/test_core.py::test_create_config",
"tests/test_core.py::test_month_yyyy_mm_dd",
"tests/test_core.py::test_month_negative_integer",
"tests/test_core.py::test_normalize_dates_yyy_mm",
"tests/test_core.py::test_add_download_total",
"tests/test_core.py::test_validate_date_valid[-1]",
"tests/test_core.py::test_validate_date_invalid[2018-19-39]",
"tests/test_core.py::test_month_yyyy_mm",
"tests/test_core.py::test_normalize[pypinfo-pypinfo]",
"tests/test_core.py::test_validate_date_invalid[something",
"tests/test_core.py::test_validate_date_invalid[1]",
"tests/test_core.py::test_add_percentages",
"tests/test_core.py::test_format_json",
"tests/test_core.py::test_format_date_yyy_mm_dd",
"tests/test_core.py::test_tabulate_default"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-01-25 19:16:06+00:00
|
mit
| 4,342
|
|
eWaterCycle__grpc4bmi-129
|
diff --git a/grpc4bmi/bmi_client_apptainer.py b/grpc4bmi/bmi_client_apptainer.py
index 100a85b..a4169e2 100644
--- a/grpc4bmi/bmi_client_apptainer.py
+++ b/grpc4bmi/bmi_client_apptainer.py
@@ -8,7 +8,7 @@ from typing import Iterable
from packaging.specifiers import SpecifierSet
from packaging.version import Version
-from typeguard import check_argument_types, qualified_name
+from typeguard import typechecked
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException
@@ -194,13 +194,13 @@ class BmiClientApptainer(BmiClient):
"""
+ @typechecked
def __init__(self, image: str, work_dir: str, input_dirs: Iterable[str] = tuple(), delay=0, timeout=None,
capture_logs=True,
):
- assert check_argument_types()
if type(input_dirs) == str:
msg = f'type of argument "input_dirs" must be collections.abc.Iterable; ' \
- f'got {qualified_name(input_dirs)} instead'
+ f'got {type(input_dirs)} instead'
raise TypeError(msg)
check_apptainer_version()
host = 'localhost'
diff --git a/grpc4bmi/bmi_client_docker.py b/grpc4bmi/bmi_client_docker.py
index d14fe67..328400a 100644
--- a/grpc4bmi/bmi_client_docker.py
+++ b/grpc4bmi/bmi_client_docker.py
@@ -5,7 +5,7 @@ from typing import Iterable
import docker
from docker.models.containers import Container
-from typeguard import check_argument_types, qualified_name
+from typeguard import typechecked
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.exceptions import DeadContainerException
@@ -58,14 +58,14 @@ class BmiClientDocker(BmiClient):
See :py:class:`grpc4bmi.bmi_client_apptainer.BmiClientApptainer` for examples using `input_dirs` and `work_dir`.
"""
+ @typechecked
def __init__(self, image: str, work_dir: str, image_port=50051, host=None,
input_dirs: Iterable[str] = tuple(),
user=os.getuid(), remove=False, delay=5,
timeout=None):
- assert check_argument_types()
if type(input_dirs) == str:
msg = f'type of argument "input_dirs" must be collections.abc.Iterable; ' \
- f'got {qualified_name(input_dirs)} instead'
+ f'got {type(input_dirs)} instead'
raise TypeError(msg)
port = BmiClient.get_unique_port()
client = docker.from_env()
diff --git a/grpc4bmi/bmi_client_singularity.py b/grpc4bmi/bmi_client_singularity.py
index 3a49871..f2f1e4a 100644
--- a/grpc4bmi/bmi_client_singularity.py
+++ b/grpc4bmi/bmi_client_singularity.py
@@ -8,7 +8,7 @@ from typing import Iterable
from packaging.specifiers import SpecifierSet
from packaging.version import Version
-from typeguard import check_argument_types, qualified_name
+from typeguard import typechecked
from grpc4bmi.bmi_grpc_client import BmiClient
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException, SingularityVersionException
@@ -197,13 +197,13 @@ class BmiClientSingularity(BmiClient):
"""
+ @typechecked
def __init__(self, image: str, work_dir: str, input_dirs: Iterable[str] = tuple(), delay=0, timeout=None,
capture_logs=True,
):
- assert check_argument_types()
if type(input_dirs) == str:
msg = f'type of argument "input_dirs" must be collections.abc.Iterable; ' \
- f'got {qualified_name(input_dirs)} instead'
+ f'got {type(input_dirs)} instead'
raise TypeError(msg)
check_singularity_version()
host = 'localhost'
diff --git a/grpc4bmi/bmi_grpc_client.py b/grpc4bmi/bmi_grpc_client.py
index cacfa34..2db4cfe 100644
--- a/grpc4bmi/bmi_grpc_client.py
+++ b/grpc4bmi/bmi_grpc_client.py
@@ -9,7 +9,7 @@ import numpy as np
from bmipy import Bmi
import grpc
import numpy
-from typeguard import check_argument_types
+from typeguard import typechecked
from grpc_status import rpc_status
from google.rpc import error_details_pb2
@@ -94,8 +94,8 @@ class BmiClient(Bmi):
s.bind(("" if host is None else host, 0))
return int(s.getsockname()[1])
+ @typechecked
def initialize(self, filename: Optional[str]):
- assert check_argument_types()
fname = "" if filename is None else filename
try:
return self.stub.initialize(bmi_pb2.InitializeRequest(config_file=fname))
diff --git a/pyproject.toml b/pyproject.toml
index 4d4d9e8..de9cf52 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -28,7 +28,7 @@ dependencies = [
"grpcio-reflection",
"grpcio-status",
"googleapis-common-protos>=1.5.5",
- "protobuf",
+ "protobuf>=4,<5",
"numpy",
"docker",
"bmipy",
|
eWaterCycle/grpc4bmi
|
3798bbc460494783e47c53a8e58be07e89d3f855
|
diff --git a/test/test_apptainer.py b/test/test_apptainer.py
index 8dd570b..cd5ea5e 100644
--- a/test/test_apptainer.py
+++ b/test/test_apptainer.py
@@ -1,7 +1,8 @@
import os
-from typing import Type, Union
+from typing import Type
import pytest
+from typeguard import TypeCheckError
from grpc4bmi.bmi_client_apptainer import SUPPORTED_APPTAINER_VERSIONS, BmiClientApptainer, check_apptainer_version_string
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException
@@ -64,17 +65,17 @@ class TestBmiClientApptainerBadDays:
BmiClientApptainer(image=IMAGE_NAME, input_dirs=(some_dir,), work_dir=some_dir)
def test_workdir_as_number(self):
- with pytest.raises(TypeError, match='must be str'):
+ with pytest.raises(TypeCheckError, match='is not an instance of str'):
BmiClientApptainer(image=IMAGE_NAME, work_dir=42)
def test_inputdirs_as_str(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got str instead'):
+ with pytest.raises(TypeError, match=' must be collections.abc.Iterable'):
BmiClientApptainer(image=IMAGE_NAME, input_dirs='old type', work_dir=some_dir)
def test_inputdirs_as_number(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got int instead'):
+ with pytest.raises(TypeCheckError, match='is not an instance of collections.abc.Iterable'):
BmiClientApptainer(image=IMAGE_NAME, input_dirs=42, work_dir=some_dir)
diff --git a/test/test_client.py b/test/test_client.py
index e1c643a..cf00c3b 100644
--- a/test/test_client.py
+++ b/test/test_client.py
@@ -9,6 +9,7 @@ from google.protobuf import any_pb2
from google.rpc import error_details_pb2, status_pb2, code_pb2
from grpc_status import rpc_status
from heat import BmiHeat
+from typeguard import TypeCheckError
from grpc4bmi.bmi_grpc_server import BmiServer
from grpc4bmi.bmi_grpc_client import BmiClient, RemoteException, handle_error
@@ -108,7 +109,7 @@ def test_initialize():
def test_initialize_with_nonstring():
client, local = make_bmi_classes(False)
assert client is not None
- with pytest.raises(TypeError, match='got int instead'):
+ with pytest.raises(TypeCheckError, match='did not match any element in the union'):
client.initialize(42)
client.finalize()
del client
diff --git a/test/test_docker.py b/test/test_docker.py
index 7b8fc65..812ec21 100644
--- a/test/test_docker.py
+++ b/test/test_docker.py
@@ -3,6 +3,7 @@ from io import BytesIO
import docker
import numpy as np
import pytest
+from typeguard import TypeCheckError
from grpc4bmi.bmi_client_docker import BmiClientDocker
from grpc4bmi.exceptions import DeadContainerException
@@ -102,17 +103,17 @@ class TestBmiClientDocker:
BmiClientDocker(image=walrus_docker_image, image_port=55555, input_dirs=(some_dir,), work_dir=some_dir)
def test_workdir_as_number(self):
- with pytest.raises(TypeError, match='must be str'):
+ with pytest.raises(TypeCheckError, match='is not an instance of str'):
BmiClientDocker(image=walrus_docker_image, work_dir=42)
def test_inputdirs_as_str(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got str instead'):
+ with pytest.raises(TypeError, match='must be collections.abc.Iterable'):
BmiClientDocker(image=walrus_docker_image, input_dirs='old type', work_dir=some_dir)
def test_inputdirs_as_number(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got int instead'):
+ with pytest.raises(TypeCheckError, match='is not an instance of collections.abc.Iterable'):
BmiClientDocker(image=walrus_docker_image, input_dirs=42, work_dir=some_dir)
def test_logs(self, walrus_model, capfd):
diff --git a/test/test_singularity.py b/test/test_singularity.py
index 41e4265..1f9d8b1 100644
--- a/test/test_singularity.py
+++ b/test/test_singularity.py
@@ -10,6 +10,7 @@ from grpc import RpcError
from nbconvert.preprocessors import ExecutePreprocessor
from nbformat.v4 import new_notebook, new_code_cell
import numpy as np
+from typeguard import TypeCheckError
from grpc4bmi.bmi_client_singularity import SUPPORTED_APPTAINER_VERSIONS, SUPPORTED_SINGULARITY_VERSIONS, BmiClientSingularity, check_singularity_version_string
from grpc4bmi.exceptions import ApptainerVersionException, DeadContainerException, SingularityVersionException
@@ -167,17 +168,17 @@ class TestBmiClientSingularity:
assert len(model.get_value('Q', np.zeros(1,))) == 1
def test_workdir_as_number(self):
- with pytest.raises(TypeError, match='must be str'):
+ with pytest.raises(TypeCheckError, match='is not an instance of str'):
BmiClientSingularity(image=IMAGE_NAME, work_dir=42)
def test_inputdirs_as_str(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got str instead'):
+ with pytest.raises(TypeError, match='must be collections.abc.Iterable'):
BmiClientSingularity(image=IMAGE_NAME, input_dirs='old type', work_dir=some_dir)
def test_inputdirs_as_number(self, tmp_path):
some_dir = str(tmp_path)
- with pytest.raises(TypeError, match='must be collections.abc.Iterable; got int instead'):
+ with pytest.raises(TypeCheckError, match='is not an instance of collections.abc.Iterable'):
BmiClientSingularity(image=IMAGE_NAME, input_dirs=42, work_dir=some_dir)
|
Incompatibility with typeguard 3
Seems typeguard 3 was released which deprecates functions used in this repo. We should use the new way or pin typeguard to <3.
For example in ewatercycle test we got:
````
________________ ERROR collecting tests/forcing/test_default.py ________________
16
ImportError while importing test module '/home/runner/work/ewatercycle/ewatercycle/tests/forcing/test_default.py'.
17
Hint: make sure your test modules/packages have valid Python names.
18
Traceback:
19
/usr/share/miniconda3/envs/ewatercycle/lib/python3.9/importlib/__init__.py:127: in import_module
20
return _bootstrap._gcd_import(name[level:], package, level)
21
tests/forcing/test_default.py:5: in <module>
22
from ewatercycle.forcing import (
23
src/ewatercycle/forcing/__init__.py:8: in <module>
24
from . import _hype, _lisflood, _marrmot, _pcrglobwb, _wflow
25
src/ewatercycle/forcing/_lisflood.py:18: in <module>
26
from ._lisvap import create_lisvap_config, lisvap
27
src/ewatercycle/forcing/_lisvap.py:29: in <module>
28
from ewatercycle.container import ContainerEngine
29
src/ewatercycle/container.py:7: in <module>
30
from grpc4bmi.bmi_client_docker import BmiClientDocker
31
/usr/share/miniconda3/envs/ewatercycle/lib/python3.9/site-packages/grpc4bmi/bmi_client_docker.py:8: in <module>
32
from typeguard import check_argument_types, qualified_name
33
E ImportError: cannot import name 'check_argument_types' from 'typeguard' (/usr/share/miniconda3/envs/ewatercycle/lib/python3.9/site-packages/typeguard/__init__.py)
```
Found while running bmi2 branch.
|
0.0
|
3798bbc460494783e47c53a8e58be07e89d3f855
|
[
"test/test_apptainer.py::Test_check_apptainer_version_string::test_ok[apptainer",
"test/test_apptainer.py::Test_check_apptainer_version_string::test_too_old[apptainer",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_workdir_as_number",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_inputdirs_as_str",
"test/test_apptainer.py::TestBmiClientApptainerBadDays::test_inputdirs_as_number",
"test/test_client.py::test_server_start",
"test/test_client.py::test_component_name",
"test/test_client.py::test_input_item_count",
"test/test_client.py::test_output_item_count",
"test/test_client.py::test_input_var_names",
"test/test_client.py::test_output_var_names",
"test/test_client.py::test_initialize",
"test/test_client.py::test_initialize_with_nonstring",
"test/test_client.py::test_update",
"test/test_client.py::test_update_until",
"test/test_client.py::test_get_time_unit",
"test/test_client.py::test_get_time_step",
"test/test_client.py::test_get_current_time",
"test/test_client.py::test_get_updated_time",
"test/test_client.py::test_get_start_end_time",
"test/test_client.py::test_get_var_grid",
"test/test_client.py::test_get_var_type",
"test/test_client.py::test_get_var_units",
"test/test_client.py::test_get_var_nbytes",
"test/test_client.py::test_get_var_location",
"test/test_client.py::test_get_var_value",
"test/test_client.py::test_get_value_ptr",
"test/test_client.py::test_get_vals_indices",
"test/test_client.py::test_set_var_value",
"test/test_client.py::test_set_values_indices",
"test/test_client.py::test_get_grid_size",
"test/test_client.py::test_get_grid_rank",
"test/test_client.py::test_get_grid_type",
"test/test_client.py::test_get_grid_shape",
"test/test_client.py::test_get_grid_spacing",
"test/test_client.py::test_get_grid_origin",
"test/test_client.py::test_method_exception[initialize-client_request0]",
"test/test_client.py::test_method_exception[update-client_request1]",
"test/test_client.py::test_method_exception[update_until-client_request2]",
"test/test_client.py::test_method_exception[finalize-client_request3]",
"test/test_client.py::test_method_exception[get_component_name-client_request4]",
"test/test_client.py::test_method_exception[get_input_item_count-client_request5]",
"test/test_client.py::test_method_exception[get_output_item_count-client_request6]",
"test/test_client.py::test_method_exception[get_input_var_names-client_request7]",
"test/test_client.py::test_method_exception[get_output_var_names-client_request8]",
"test/test_client.py::test_method_exception[get_time_units-client_request9]",
"test/test_client.py::test_method_exception[get_time_step-client_request10]",
"test/test_client.py::test_method_exception[get_current_time-client_request11]",
"test/test_client.py::test_method_exception[get_start_time-client_request12]",
"test/test_client.py::test_method_exception[get_end_time-client_request13]",
"test/test_client.py::test_method_exception[get_var_grid-client_request14]",
"test/test_client.py::test_method_exception[get_var_type-client_request15]",
"test/test_client.py::test_method_exception[get_var_itemsize-client_request16]",
"test/test_client.py::test_method_exception[get_var_units-client_request17]",
"test/test_client.py::test_method_exception[get_var_nbytes-client_request18]",
"test/test_client.py::test_method_exception[get_var_location-client_request19]",
"test/test_client.py::test_method_exception[get_value-client_request20]",
"test/test_client.py::test_method_exception[get_value_at_indices-client_request21]",
"test/test_client.py::test_method_exception[set_value-client_request22]",
"test/test_client.py::test_method_exception[set_value_at_indices-client_request23]",
"test/test_client.py::test_method_exception[get_grid_size-client_request24]",
"test/test_client.py::test_method_exception[get_grid_type-client_request25]",
"test/test_client.py::test_method_exception[get_grid_rank-client_request26]",
"test/test_client.py::test_method_exception[get_grid_x-client_request27]",
"test/test_client.py::test_method_exception[get_grid_y-client_request28]",
"test/test_client.py::test_method_exception[get_grid_z-client_request29]",
"test/test_client.py::test_method_exception[get_grid_shape-client_request30]",
"test/test_client.py::test_method_exception[get_grid_spacing-client_request31]",
"test/test_client.py::test_method_exception[get_grid_origin-client_request32]",
"test/test_client.py::test_method_exception[get_grid_node_count-client_request33]",
"test/test_client.py::test_method_exception[get_grid_edge_count-client_request34]",
"test/test_client.py::test_method_exception[get_grid_face_count-client_request35]",
"test/test_client.py::test_method_exception[get_grid_edge_nodes-client_request36]",
"test/test_client.py::test_method_exception[get_grid_face_nodes-client_request37]",
"test/test_client.py::test_method_exception[get_grid_face_edges-client_request38]",
"test/test_client.py::test_method_exception[get_grid_nodes_per_face-client_request39]",
"test/test_client.py::TestUniRectGridModel::test_grid_type",
"test/test_client.py::TestUniRectGridModel::test_grid_size",
"test/test_client.py::TestUniRectGridModel::test_grid_rank",
"test/test_client.py::TestUniRectGridModel::test_grid_shape",
"test/test_client.py::TestUniRectGridModel::test_grid_origin",
"test/test_client.py::TestUniRectGridModel::test_grid_spacing",
"test/test_client.py::TestRect3DGridModel::test_grid_size",
"test/test_client.py::TestRect3DGridModel::test_grid_rank",
"test/test_client.py::TestRect3DGridModel::test_grid_x",
"test/test_client.py::TestRect3DGridModel::test_grid_y",
"test/test_client.py::TestRect3DGridModel::test_grid_z",
"test/test_client.py::TestRect2DGridModel::test_grid_size",
"test/test_client.py::TestRect2DGridModel::test_grid_rank",
"test/test_client.py::TestRect2DGridModel::test_grid_x",
"test/test_client.py::TestRect2DGridModel::test_grid_y",
"test/test_client.py::TestRect2DGridModel::test_grid_z",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_size",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_rank",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_shape",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_x",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_y",
"test/test_client.py::TestStructured3DQuadrilateralsGridModel::test_grid_z",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_size",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_rank",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_shape",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_x",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_y",
"test/test_client.py::TestStructured2DQuadrilateralsGridModel::test_grid_z",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_shape",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_size",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_rank",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_node_count",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_edge_count",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_face_count",
"test/test_client.py::TestUnstructuredGridBmiModel::test_get_grid_edge_nodes",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_face_nodes",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_face_edges",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_nodes_per_face",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_x",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_y",
"test/test_client.py::TestUnstructuredGridBmiModel::test_grid_z",
"test/test_client.py::TestFloat32Model::test_get_value",
"test/test_client.py::TestFloat32Model::test_get_value_at_indices",
"test/test_client.py::TestFloat32Model::test_set_value",
"test/test_client.py::TestFloat32Model::test_set_value_at_indices",
"test/test_client.py::TestInt32Model::test_get_value",
"test/test_client.py::TestInt32Model::test_get_value_at_indices",
"test/test_client.py::TestInt32Model::test_set_value",
"test/test_client.py::TestInt32Model::test_set_value_at_indices",
"test/test_client.py::TestBooleanModel::test_get_value",
"test/test_client.py::TestBooleanModel::test_get_value_at_indices",
"test/test_client.py::TestBooleanModel::test_set_value",
"test/test_client.py::TestBooleanModel::test_set_value_at_indices",
"test/test_client.py::test_handle_error_with_stacktrace",
"test/test_client.py::test_handle_error_without_stacktrace",
"test/test_client.py::test_handle_error_without_status",
"test/test_client.py::TestModelWithItemSizeZeroAndVarTypeFloat32::test_get_var_itemsize",
"test/test_client.py::TestModelWithItemSizeZeroAndUnknownVarType::test_get_var_itemsize",
"test/test_docker.py::TestBmiClientDocker::test_workdir_as_number",
"test/test_docker.py::TestBmiClientDocker::test_inputdirs_as_str",
"test/test_docker.py::TestBmiClientDocker::test_inputdirs_as_number",
"test/test_singularity.py::TestBmiClientSingularity::test_workdir_as_number",
"test/test_singularity.py::TestBmiClientSingularity::test_inputdirs_as_str",
"test/test_singularity.py::TestBmiClientSingularity::test_inputdirs_as_number",
"test/test_singularity.py::Test_check_singularity_version_string::test_ok[singularity",
"test/test_singularity.py::Test_check_singularity_version_string::test_ok[apptainer",
"test/test_singularity.py::Test_check_singularity_version_string::test_too_old[singularity",
"test/test_singularity.py::Test_check_singularity_version_string::test_too_old[apptainer"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-03-16 07:04:55+00:00
|
apache-2.0
| 2,063
|
|
swaroopch__edn_format-43
|
diff --git a/edn_format/edn_lex.py b/edn_format/edn_lex.py
index ac0a3af..fc2026e 100644
--- a/edn_format/edn_lex.py
+++ b/edn_format/edn_lex.py
@@ -102,7 +102,8 @@ tokens = ('WHITESPACE',
'MAP_START',
'SET_START',
'MAP_OR_SET_END',
- 'TAG')
+ 'TAG',
+ 'DISCARD_TAG')
PARTS = {}
PARTS["non_nums"] = r"\w.*+!\-_?$%&=:#<>@"
@@ -138,7 +139,7 @@ KEYWORD = (":"
"[{all}]+"
")").format(**PARTS)
TAG = (r"\#"
- r"\w"
+ r"[a-zA-Z]" # https://github.com/edn-format/edn/issues/30#issuecomment-8540641
"("
"[{all}]*"
r"\/"
@@ -147,6 +148,8 @@ TAG = (r"\#"
"[{all}]*"
")").format(**PARTS)
+DISCARD_TAG = r"\#\_"
+
t_VECTOR_START = r'\['
t_VECTOR_END = r'\]'
t_LIST_START = r'\('
@@ -228,9 +231,10 @@ def t_COMMENT(t):
pass # ignore
-def t_DISCARD(t):
- r'\#_\S+\b'
- pass # ignore
[email protected](DISCARD_TAG)
+def t_DISCARD_TAG(t):
+ t.value = t.value[1:]
+ return t
@ply.lex.TOKEN(TAG)
diff --git a/edn_format/edn_parse.py b/edn_format/edn_parse.py
index c2be09d..329584e 100644
--- a/edn_format/edn_parse.py
+++ b/edn_format/edn_parse.py
@@ -56,41 +56,21 @@ def p_term_leaf(p):
p[0] = p[1]
-def p_empty_vector(p):
- """vector : VECTOR_START VECTOR_END"""
- p[0] = ImmutableList([])
-
-
def p_vector(p):
"""vector : VECTOR_START expressions VECTOR_END"""
p[0] = ImmutableList(p[2])
-def p_empty_list(p):
- """list : LIST_START LIST_END"""
- p[0] = tuple()
-
-
def p_list(p):
"""list : LIST_START expressions LIST_END"""
p[0] = tuple(p[2])
-def p_empty_set(p):
- """set : SET_START MAP_OR_SET_END"""
- p[0] = frozenset()
-
-
def p_set(p):
"""set : SET_START expressions MAP_OR_SET_END"""
p[0] = frozenset(p[2])
-def p_empty_map(p):
- """map : MAP_START MAP_OR_SET_END"""
- p[0] = ImmutableDict({})
-
-
def p_map(p):
"""map : MAP_START expressions MAP_OR_SET_END"""
terms = p[2]
@@ -100,14 +80,20 @@ def p_map(p):
p[0] = ImmutableDict(dict([terms[i:i + 2] for i in range(0, len(terms), 2)]))
-def p_expressions_expressions_expression(p):
- """expressions : expressions expression"""
- p[0] = p[1] + [p[2]]
+def p_discarded_expressions(p):
+ """discarded_expressions : DISCARD_TAG expression discarded_expressions
+ |"""
+ p[0] = []
+
+def p_expressions_expression_expressions(p):
+ """expressions : expression expressions"""
+ p[0] = [p[1]] + p[2]
-def p_expressions_expression(p):
- """expressions : expression"""
- p[0] = [p[1]]
+
+def p_expressions_empty(p):
+ """expressions : discarded_expressions"""
+ p[0] = []
def p_expression(p):
@@ -119,6 +105,11 @@ def p_expression(p):
p[0] = p[1]
+def p_expression_discard_expression_expression(p):
+ """expression : DISCARD_TAG expression expression"""
+ p[0] = p[3]
+
+
def p_expression_tagged_element(p):
"""expression : TAG expression"""
tag = p[1]
@@ -144,9 +135,13 @@ def p_expression_tagged_element(p):
p[0] = output
+def eof():
+ raise EDNDecodeError('EOF Reached')
+
+
def p_error(p):
if p is None:
- raise EDNDecodeError('EOF Reached')
+ eof()
else:
raise EDNDecodeError(p)
|
swaroopch/edn_format
|
7a3865c6d7ddc1a8d2d8ecb4114f11ed8b96fda8
|
diff --git a/tests.py b/tests.py
index 8f7fcc1..29562d5 100644
--- a/tests.py
+++ b/tests.py
@@ -133,6 +133,12 @@ class EdnTest(unittest.TestCase):
def check_roundtrip(self, data_input, **kw):
self.assertEqual(data_input, loads(dumps(data_input, **kw)))
+ def check_eof(self, data_input, **kw):
+ with self.assertRaises(EDNDecodeError) as ctx:
+ loads(data_input, **kw)
+
+ self.assertEqual('EOF Reached', str(ctx.exception))
+
def test_dump(self):
self.check_roundtrip({1, 2, 3})
self.check_roundtrip({1, 2, 3}, sort_sets=True)
@@ -339,6 +345,57 @@ class EdnTest(unittest.TestCase):
set(seq),
sort_sets=True)
+ def test_discard(self):
+ for expected, edn_data in (
+ ('[x]', '[x #_ z]'),
+ ('[z]', '[#_ x z]'),
+ ('[x z]', '[x #_ y z]'),
+ ('{1 4}', '{1 #_ 2 #_ 3 4}'),
+ ('[1 2]', '[1 #_ [ #_ [ #_ [ #_ [ #_ 42 ] ] ] ] 2 ]'),
+ ('[1 2 11]', '[1 2 #_ #_ #_ #_ 4 5 6 #_ 7 #_ #_ 8 9 10 11]'),
+ ('()', '(#_(((((((1))))))))'),
+ ('[6]', '[#_ #_ #_ #_ #_ 1 2 3 4 5 6]'),
+ ('[4]', '[#_ #_ 1 #_ 2 3 4]'),
+ ('{:a 1}', '{:a #_:b 1}'),
+ ('[42]', '[42 #_ {:a [1 2 3 4] true false 1 #inst "2017"}]'),
+ ('#{1}', '#{1 #_foo}'),
+ ('"#_ foo"', '"#_ foo"'),
+ ('["#" _]', '[\#_]'),
+ ('[_]', '[#_\#_]'),
+ ('[1]', '[1 #_\n\n42]'),
+ ('{}', '{#_ 1}'),
+ ):
+ self.assertEqual(expected, dumps(loads(edn_data)), edn_data)
+
+ def test_discard_syntax_errors(self):
+ for edn_data in ('#_', '#_ #_ 1', '#inst #_ 2017', '[#_]'):
+ with self.assertRaises(EDNDecodeError):
+ loads(edn_data)
+
+ def test_discard_all(self):
+ for edn_data in (
+ '42', '-1', 'nil', 'true', 'false', '"foo"', '\\space', '\\a',
+ ':foo', ':foo/bar', '[]', '{}', '#{}', '()', '(a)', '(a b)',
+ '[a [[[b] c]] 2]', '#inst "2017"',
+ ):
+ self.assertEqual([1], loads('[1 #_ {}]'.format(edn_data)), edn_data)
+ self.assertEqual([1], loads('[#_ {} 1]'.format(edn_data)), edn_data)
+
+ self.check_eof('#_ {}'.format(edn_data))
+
+ for coll in ('[%s]', '(%s)', '{%s}', '#{%s}'):
+ expected = coll % ""
+ edn_data = coll % '#_ {}'.format(edn_data)
+ self.assertEqual(expected, dumps(loads(edn_data)), edn_data)
+
+ def test_chained_discards(self):
+ for expected, edn_data in (
+ ('[]', '[#_ 1 #_ 2 #_ 3]'),
+ ('[]', '[#_ #_ 1 2 #_ 3]'),
+ ('[]', '[#_ #_ #_ 1 2 3]'),
+ ):
+ self.assertEqual(expected, dumps(loads(edn_data)), edn_data)
+
class EdnInstanceTest(unittest.TestCase):
def test_hashing(self):
|
not handling discard as expected
`[x #_ y z]` should yield `[Symbol(x), Symbol(z)]` but instead it is failing saying: "Don't know how to handle tag _"
|
0.0
|
7a3865c6d7ddc1a8d2d8ecb4114f11ed8b96fda8
|
[
"tests.py::EdnTest::test_chained_discards",
"tests.py::EdnTest::test_discard",
"tests.py::EdnTest::test_discard_all",
"tests.py::EdnTest::test_discard_syntax_errors"
] |
[
"tests.py::ConsoleTest::test_dumping",
"tests.py::EdnTest::test_chars",
"tests.py::EdnTest::test_dump",
"tests.py::EdnTest::test_exceptions",
"tests.py::EdnTest::test_keyword_keys",
"tests.py::EdnTest::test_lexer",
"tests.py::EdnTest::test_parser",
"tests.py::EdnTest::test_proper_unicode_escape",
"tests.py::EdnTest::test_round_trip_conversion",
"tests.py::EdnTest::test_round_trip_inst_short",
"tests.py::EdnTest::test_round_trip_same",
"tests.py::EdnTest::test_round_trip_sets",
"tests.py::EdnTest::test_sort_keys",
"tests.py::EdnTest::test_sort_sets",
"tests.py::EdnInstanceTest::test_equality",
"tests.py::EdnInstanceTest::test_hashing",
"tests.py::ImmutableListTest::test_list"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2018-08-04 10:41:01+00:00
|
apache-2.0
| 5,813
|
|
BlueBrain__atlas-densities-61
|
diff --git a/README.rst b/README.rst
index 8b8cc3b..8479266 100644
--- a/README.rst
+++ b/README.rst
@@ -10,8 +10,8 @@ The outcome of this project is a list of volumetric files that provides cell typ
for each voxel of the mouse brain volume. The BBP Cell Atlas is the first model required to
reconstruct BBP circuits of the mouse brain.
-The tools implementation is based on the methods of `Eroe et al. (2018)`_, `Rodarie et al. (2021)`_,
-and `Roussel et al. (2021)`_.
+The tools implementation is based on the methods of `Eroe et al. (2018)`_, `Rodarie et al. (2022)`_,
+and `Roussel et al. (2022)`_.
The source code was originally written by Csaba Eroe, Dimitri Rodarie, Hugo Dictus, Lu Huanxiang,
Wojciech Wajerowicz, Jonathan Lurie, and Yann Roussel.
@@ -53,7 +53,7 @@ Note: Depending on the size and resolution of the atlas, it can happen that some
Reference atlases
-----------------
-Most the pipeline steps rely on the following AIBS reference datasets (see `Rodarie et al. (2021)`_ for more
+Most the pipeline steps rely on the following AIBS reference datasets (see `Rodarie et al. (2022)`_ for more
details on the different versions of these datasets):
* A Nissl volume
@@ -161,7 +161,7 @@ ISH datasets for inhibitory/excitatory neurons
In `Eroe et al. (2018)`_ (i.e., BBP Cell Atlas version 1), the excitatory neurons are distinguished
from the inhibitory neurons using the Nrn1 and GAD67 (or GAD1) genetic marker.
-In `Rodarie et al. (2021)`_ (i.e., BBP Cell Atlas version 2), the authors used parvalbumin (Pvalb),
+In `Rodarie et al. (2022)`_ (i.e., BBP Cell Atlas version 2), the authors used parvalbumin (Pvalb),
somatostatin (SST), vasoactive intestinal peptide (VIP) and gabaergic (GAD1) markers (see also
`fit_average_densities_ccfv2_config.yaml`_).
@@ -207,7 +207,7 @@ The files `glia.nrrd`, `oligodendrocyte.nrrd`, `microglia.nrrd`, `astrocyte.nrrd
Extract literature neuron type densities estimates
--------------------------------------------------
-In `Rodarie et al. (2021)`_, the authors collected density estimates from the literature for
+In `Rodarie et al. (2022)`_, the authors collected density estimates from the literature for
inhibitory neurons. Some estimates are in a format that can not be directly used by the pipeline
(e.g., counts instead of densities). This part of the pipeline integrates the literature values into
csv files, that will be used later on for the fitting.
@@ -216,7 +216,7 @@ Format literature review files
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We compile here the cell density estimates related to measurements of `Kim et al. (2017)`_ density
-file (`mmc3.xlsx`_) and `Rodarie et al. (2021)`_ literature
+file (`mmc3.xlsx`_) and `Rodarie et al. (2022)`_ literature
review file (`gaba_papers.xlsx`_) into a single CSV file.
Regions known to be purely excitatory or inhibitory (in terms of neuron composition) are also listed
in a separate CSV file.
@@ -237,6 +237,13 @@ Convert literature measurements into average densities
Compute and save average cell densities based on literature measurements and Cell Atlas data (e.g.,
region volumes).
+WARNING:
+Different versions of the annotation atlas or the hierarchy file might have different sets brain
+regions (see `Rodarie et al. (2022)`_ for more details). The region names used by the literature
+measurements might therefore have no match in these datasets.
+Regions from the measurements that are not in the hierarchy or do not appear in the annotations will
+be ignored. A warning message will display these regions, allowing us to review them.
+
.. code-block:: bash
atlas-densities cell-densities measurements-to-average-densities \
@@ -252,7 +259,7 @@ Fit transfer functions from mean region intensity to neuron density
-------------------------------------------------------------------
We fit here transfer functions that describe the relation between mean ISH expression in regions of
-the mouse brain and literature regional density estimates (see `Rodarie et al. (2021)`_ for more
+the mouse brain and literature regional density estimates (see `Rodarie et al. (2022)`_ for more
details). This step leverages AIBS ISH marker datasets (in their expression form, see also
`fit_average_densities_ccfv2_config.yaml`_) and the previously computed
literature density values.
@@ -281,7 +288,7 @@ Compute inhibitory/excitatory neuron densities
----------------------------------------------
The neuron subtypes are here distinguished from each other using either the pipeline from
-`Eroe et al. (2018)`_ (BBP Cell Atlas version 1) or `Rodarie et al. (2021)`_ (BBP Cell Atlas version
+`Eroe et al. (2018)`_ (BBP Cell Atlas version 1) or `Rodarie et al. (2022)`_ (BBP Cell Atlas version
2).
BBP Cell Atlas version 1
@@ -321,8 +328,8 @@ Compute ME-types densities from a probability map
-------------------------------------------------
Morphological and Electrical type densities of inhibitory neurons in the isocortex can be estimated
-using Roussel et al.'s pipeline. This pipeline produces a mapping from inhibitory neuron molecular
-types (here PV, SST, VIP and GAD67) to ME-types defined in `Markram et al. (2015)`_.
+using `Roussel et al. (2022)`_'s pipeline. This pipeline produces a mapping from inhibitory neuron
+molecular types (here PV, SST, VIP and GAD67) to ME-types defined in `Markram et al. (2015)`_.
The following command creates neuron density nrrd files for the me-types listed in a probability
mapping csv file (see also `mtypes_probability_map_config.yaml`_).
@@ -431,8 +438,8 @@ Copyright © 2022 Blue Brain Project/EPFL
.. _`Eroe et al. (2018)`: https://www.frontiersin.org/articles/10.3389/fninf.2018.00084/full
.. _`Kim et al. (2017)`: https://www.sciencedirect.com/science/article/pii/S0092867417310693
.. _`Markram et al. (2015)`: https://www.cell.com/cell/fulltext/S0092-8674(15)01191-5
-.. _`Rodarie et al. (2021)`: https://www.biorxiv.org/content/10.1101/2021.11.20.469384v2
-.. _`Roussel et al. (2021)`: https://www.biorxiv.org/content/10.1101/2021.11.24.469815v1
+.. _`Rodarie et al. (2022)`: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1010739
+.. _`Roussel et al. (2022)`: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1010058
.. _`BBP Cell Atlas`: https://portal.bluebrain.epfl.ch/resources/models/cell-atlas/
.. _cgal-pybind: https://github.com/BlueBrain/cgal-pybind
.. _`DeepAtlas`: https://github.com/BlueBrain/Deep-Atlas
diff --git a/atlas_densities/app/cell_densities.py b/atlas_densities/app/cell_densities.py
index 019145e..f60a87b 100644
--- a/atlas_densities/app/cell_densities.py
+++ b/atlas_densities/app/cell_densities.py
@@ -621,6 +621,12 @@ def compile_measurements(
@app.command()
@common_atlas_options
[email protected](
+ "--region-name",
+ type=str,
+ default="root",
+ help="Name of the root region in the hierarchy",
+)
@click.option(
"--cell-density-path",
type=EXISTING_FILE_PATH,
@@ -654,6 +660,7 @@ def compile_measurements(
def measurements_to_average_densities(
annotation_path,
hierarchy_path,
+ region_name,
cell_density_path,
neuron_density_path,
measurements_path,
@@ -666,6 +673,10 @@ def measurements_to_average_densities(
`neuron_density_path`) are used to compute average cell densities in every AIBS region where
sufficient information is available.
+ Measurements from regions which are not in the provided brain region hierarchy or not in the
+ provided annotation volume will be ignored. A warning with all ignored lines from the
+ measurements file will be displayed.
+
The different cell types (e.g., PV+, SST+, VIP+ or overall inhibitory neurons) and
brain regions under consideration are prescribed by the input measurements.
@@ -709,6 +720,7 @@ def measurements_to_average_densities(
region_map = RegionMap.load_json(hierarchy_path)
L.info("Loading measurements ...")
measurements_df = pd.read_csv(measurements_path)
+
L.info("Measurement to average density: started")
average_cell_densities_df = measurement_to_average_density(
region_map,
@@ -718,6 +730,7 @@ def measurements_to_average_densities(
overall_cell_density.raw,
neuron_density.raw,
measurements_df,
+ region_name,
)
remove_non_density_measurements(average_cell_densities_df)
@@ -735,7 +748,7 @@ def measurements_to_average_densities(
"--region-name",
type=str,
default="root",
- help="Name of the region in the hierarchy",
+ help="Name of the root region in the hierarchy",
)
@click.option(
"--neuron-density-path",
@@ -822,6 +835,10 @@ def fit_average_densities(
`neuron_density_path` is used to compute the average density of inhibitory neurons (a.k.a
gad67+) in every homogenous region of type "inhibitory".
+ Regions from the literature values and homogenous regions which are not in the provided brain
+ region hierarchy or not in the provided annotation volume will be ignored. A warning with all
+ ignored lines from the measurements file will be displayed.
+
Our linear fitting of density values relies on the assumption that the average cell density
(number of cells per mm^3) of a cell type T in a brain region R depends linearly on the
average intensity of a gene marker of T. The conversion factor is a constant which depends only
@@ -932,7 +949,7 @@ def fit_average_densities(
"--region-name",
type=str,
default="root",
- help="Name of the region in the hierarchy",
+ help="Name of the root region in the hierarchy",
)
@click.option(
"--neuron-density-path",
diff --git a/atlas_densities/densities/fitting.py b/atlas_densities/densities/fitting.py
index 1fa8dc6..14428ad 100644
--- a/atlas_densities/densities/fitting.py
+++ b/atlas_densities/densities/fitting.py
@@ -29,6 +29,7 @@ from scipy.optimize import curve_fit
from tqdm import tqdm
from atlas_densities.densities import utils
+from atlas_densities.densities.measurement_to_density import remove_unknown_regions
from atlas_densities.exceptions import AtlasDensitiesError, AtlasDensitiesWarning
if TYPE_CHECKING: # pragma: no cover
@@ -625,6 +626,9 @@ def linear_fitting( # pylint: disable=too-many-arguments
_check_homogenous_regions_sanity(homogenous_regions)
hierarchy_info = utils.get_hierarchy_info(region_map, root=region_name)
+ remove_unknown_regions(average_densities, region_map, annotation, hierarchy_info)
+ remove_unknown_regions(homogenous_regions, region_map, annotation, hierarchy_info)
+
L.info("Creating a data frame from known densities ...")
densities = create_dataframe_from_known_densities(
hierarchy_info["brain_region"].to_list(), average_densities
diff --git a/atlas_densities/densities/measurement_to_density.py b/atlas_densities/densities/measurement_to_density.py
index 49b8777..65170e4 100644
--- a/atlas_densities/densities/measurement_to_density.py
+++ b/atlas_densities/densities/measurement_to_density.py
@@ -11,6 +11,7 @@ more than 40 scientific articles.
Densities are expressed in number of cells per mm^3.
"""
+import warnings
from typing import Set, Tuple, Union
import numpy as np
@@ -20,6 +21,7 @@ from tqdm import tqdm
from voxcell import RegionMap # type: ignore
from atlas_densities.densities.utils import compute_region_volumes, get_hierarchy_info
+from atlas_densities.exceptions import AtlasDensitiesWarning
def get_parent_region(region_name: str, region_map: RegionMap) -> Union[str, None]:
@@ -255,7 +257,59 @@ def cell_count_per_slice_to_density(
measurements[mask_50um] = cell_counts_per_slice
-def measurement_to_average_density(
+def remove_unknown_regions(
+ measurements: "pd.DataFrame",
+ region_map: RegionMap,
+ annotation: AnnotationT,
+ hierarchy_info: "pd.DataFrame",
+):
+ """
+ Drop lines from the measurements dataframe which brain regions are not in the AIBS brain region
+ hierarchy or not in the annotation volume.
+ The data frame `measurements` is modified in place.
+
+ Args:
+ measurements: dataframe whose columns are described in
+ :func:`atlas_densities.app.densities.compile_measurements`.
+ region_map: RegionMap object to navigate the brain regions hierarchy.
+ annotation: int array of shape (W, H, D) holding the annotation of the whole AIBS
+ mouse brain. (The integers W, H and D are the dimensions of the array).
+ hierarchy_info: data frame returned by
+ :func:`atlas_densities.densities.utils.get_hierarchy_info`.
+ """
+ pd.set_option("display.max_colwidth", None)
+ indices_ids = measurements.index[
+ ~measurements["brain_region"].isin(hierarchy_info["brain_region"])
+ ]
+ if len(indices_ids) > 0:
+ warnings.warn(
+ "The following lines in the measurements dataframe have no equivalent in the "
+ "brain region hierarchy: \n"
+ f"{measurements.loc[indices_ids, 'brain_region'].to_string()}",
+ AtlasDensitiesWarning,
+ )
+ measurements.drop(indices_ids, inplace=True)
+
+ u_regions = np.unique(annotation)
+ u_regions = np.delete(u_regions, 0) # don't take 0, i.e: outside of the brain
+ u_regions = [
+ region_map.get(u_region, "name", with_ascendants=True)
+ for u_region in u_regions
+ if region_map.find(u_region, "id")
+ ]
+ u_regions = np.unique([elem for row in u_regions for elem in row]) # flatten
+
+ indices_ann = measurements.index[~measurements["brain_region"].isin(u_regions)]
+ if len(indices_ann) > 0:
+ warnings.warn(
+ "The following lines in the measurements dataframe have no equivalent in the "
+ f"annotation volume: \n{measurements.loc[indices_ann, 'brain_region'].to_string()}",
+ AtlasDensitiesWarning,
+ )
+ measurements.drop(indices_ann, inplace=True)
+
+
+def measurement_to_average_density( # pylint: disable=too-many-arguments
region_map: RegionMap,
annotation: AnnotationT,
voxel_dimensions: Tuple[float, float, float],
@@ -263,6 +317,7 @@ def measurement_to_average_density(
cell_density: FloatArray,
neuron_density: FloatArray,
measurements: "pd.DataFrame",
+ root_region: str = "Basic cell groups and regions",
) -> "pd.DataFrame":
"""
Compute average cell densities in AIBS brain regions based on experimental `measurements`.
@@ -274,9 +329,6 @@ def measurement_to_average_density(
(or if several cell density computations are possible from measurements of different
articles), the output cell density of the region is the average of the possible cell densities.
- The region names in `measurements` which are not compliant with the AIBS nomenclature (1.json)
- are ignored.
-
Args:
region_map: RegionMap object to navigate the brain regions hierarchy.
annotation: int array of shape (W, H, D) holding the annotation of the whole AIBS
@@ -291,6 +343,7 @@ def measurement_to_average_density(
in that voxel expressed in number of neurons per mm^3.
measurements: dataframe whose columns are described in
:func:`atlas_densities.app.densities.compile_measurements`.
+ root_region: name of the root region in the brain region hierarchy.
Returns:
dataframe of the same format as `measurements` but where all measurements of type
@@ -298,10 +351,8 @@ def measurement_to_average_density(
type "cell density". Densities are expressed in number of cells per mm^3.
"""
- # Filter out non-AIBS compliant region names
- hierarchy_info = get_hierarchy_info(region_map)
- indices = measurements.index[~measurements["brain_region"].isin(hierarchy_info["brain_region"])]
- measurements = measurements.drop(indices)
+ hierarchy_info = get_hierarchy_info(region_map, root_region)
+ remove_unknown_regions(measurements, region_map, annotation, hierarchy_info)
# Replace NaN standard deviations by measurement values
nan_mask = measurements["standard_deviation"].isna()
diff --git a/atlas_densities/version.py b/atlas_densities/version.py
index 907cbe3..90cc3fc 100644
--- a/atlas_densities/version.py
+++ b/atlas_densities/version.py
@@ -1,4 +1,4 @@
"""version"""
-from pkg_resources import get_distribution # type: ignore
+import importlib.metadata
-VERSION = get_distribution("atlas_densities").version
+VERSION = importlib.metadata.version("atlas_densities")
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 885f797..4014bef 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,8 +11,7 @@
import os
import sys
-from pkg_resources import get_distribution
-
+import importlib.metadata
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -52,7 +51,7 @@ project = 'atlas-densities'
# built documents.
#
# The short X.Y version.
-version = get_distribution(project).version
+version = importlib.metadata.version(project)
# The full version, including alpha/beta/rc tags.
release = version
|
BlueBrain/atlas-densities
|
67d273c11bcb85dd7a5132dbaf879f88effba112
|
diff --git a/tests/app/test_cell_densities.py b/tests/app/test_cell_densities.py
index ddc894d..df2d5dc 100644
--- a/tests/app/test_cell_densities.py
+++ b/tests/app/test_cell_densities.py
@@ -271,6 +271,8 @@ def _get_measurements_to_average_densities_result(runner, hierarchy_path, measur
hierarchy_path,
"--annotation-path",
"annotation.nrrd",
+ "--region-name",
+ "Basic cell groups and regions",
"--cell-density-path",
"cell_density.nrrd",
"--neuron-density-path",
diff --git a/tests/densities/test_measurement_to_density.py b/tests/densities/test_measurement_to_density.py
index 2c6bdb8..440d918 100644
--- a/tests/densities/test_measurement_to_density.py
+++ b/tests/densities/test_measurement_to_density.py
@@ -20,6 +20,11 @@ def region_map():
return RegionMap.from_dict(get_hierarchy())
[email protected]
+def annotations():
+ return np.array([[[0, 10710, 10710, 10711, 10711, 0]]], dtype=int)
+
+
@pytest.fixture
def cell_densities():
densities = np.array([5.0 / 9.0, 4.0 / 8.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0])
@@ -55,6 +60,37 @@ def volumes(voxel_volume=2):
)
+def test_remove_unknown_regions(region_map, annotations):
+ measurements = pd.DataFrame(
+ {
+ "brain_region": [
+ "Lobule Ii",
+ "Lobule II, granular layer",
+ "Lobule II, molecular layer",
+ ],
+ "measurement": [0.722, 28118.0, 31047],
+ "standard_deviation": [0.722, 6753.9, 5312],
+ "measurement_type": ["volume", "cell count", "cell count"],
+ "measurement_unit": ["mm^3", "number of cells", "number of cells"],
+ "source_title": ["Article 1", "Article 2", "Article 1"],
+ }
+ )
+ tested.remove_unknown_regions(measurements, region_map, annotations, get_hierarchy_info())
+ expected = pd.DataFrame(
+ {
+ "brain_region": [
+ "Lobule II, molecular layer",
+ ],
+ "measurement": [31047.0],
+ "standard_deviation": [5312.0],
+ "measurement_type": ["cell count"],
+ "measurement_unit": ["number of cells"],
+ "source_title": ["Article 1"],
+ }
+ )
+ pdt.assert_frame_equal(measurements.reset_index(drop=True), expected)
+
+
def test_cell_count_to_density(region_map, volumes):
measurements = pd.DataFrame(
{
|
Exception in fitting due to regions in json hierarchy but not in the annotations volume
On master branch, when running the cell atlas pipeline with the CCFv3 annotation atlas, the following command:
```
atlas-densities cell-densities fit-average-densities \
--hierarchy-path=data/1.json \
--annotation-path=data/ccfv3/annotation_25.nrrd \
--neuron-density-path=data/ccfv3/density_volumes/neuron_density.nrrd \
--average-densities-path=data/ccfv3/measurements/lit_densities.csv \
--homogenous-regions-path=data/ccfv3/measurements/homogeneous_regions.csv \
--gene-config-path=atlas_densities/app/data/markers/fit_average_densities_ccfv2_config.yaml \
--fitted-densities-output-path=data/ccfv3/first_estimates/first_estimates.csv \
--fitting-maps-output-path=data/ccfv3/first_estimates/fitting.json
```
This fails with the following exception:
```
Traceback (most recent call last):
File "Workspace/venv/bin/atlas-densities", line 8, in <module>
sys.exit(cli())
File "Workspace/abt/atlas-densities/atlas_densities/app/cli.py", line 24, in cli
app()
File "Workspace/venv/lib/python3.10/site-packages/click/core.py", line 1130, in __call__
return self.main(*args, **kwargs)
File "Workspace/venv/lib/python3.10/site-packages/click/core.py", line 1055, in main
rv = self.invoke(ctx)
File "Workspace/venv/lib/python3.10/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "Workspace/venv/lib/python3.10/site-packages/click/core.py", line 1657, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "Workspace/venv/lib/python3.10/site-packages/click/core.py", line 1404, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "Workspace/venv/lib/python3.10/site-packages/click/core.py", line 760, in invoke
return __callback(*args, **kwargs)
File "Workspace/venv/lib/python3.10/site-packages/atlas_commons/app_utils.py", line 47, in wrapper
function(*args, **kw)
File "Workspace/abt/atlas-densities/atlas_densities/app/cell_densities.py", line 906, in fit_average_densities
fitted_densities_df, fitting_maps = linear_fitting(
File "Workspace/abt/atlas-densities/atlas_densities/densities/fitting.py", line 624, in linear_fitting
_check_average_densities_sanity(average_densities)
File "Workspace/abt/atlas-densities/atlas_densities/densities/fitting.py", line 514, in _check_average_densities_sanity
raise AtlasDensitiesError(
atlas_densities.exceptions.AtlasDensitiesError: `average_densities` has a NaN measurement or a NaN standard deviation for the following entries: brain_region ... specimen_age
3050 Entorhinal area, lateral part, layer 2a ... 8- to 14-week-old
3051 Entorhinal area, lateral part, layer 2b ... 8- to 14-week-old
```
These lines are part of the `lit_densities.csv` and contain no mean values. These lines have been generated when creating the `lit_densities.csv` (command `atlas-densities cell-densities measurements-to-average-densities`) from `gaba_papers.xlsx`.
The regions associated with these lines exist in the brain region hierarchy but do not appear in the CCFv3 annotation atlas:
```python
import numpy as np
from voxcell import RegionMap, VoxelData
region_map = RegionMap.load_json("data/1.json")
annotation = VoxelData.load_nrrd("data/ccfv3/annotation_25.nrrd").raw
# Same for the other layer.
region_map.find("Entorhinal area, lateral part, layer 2a", "name") # {715}
region_map.is_leaf_id(715) # True
np.count_nonzero(annotation==715) # 0
```
The regions having no volume, every literature values based on them will be stored as NaNs in the final `lit_densities.csv`.
The question is when/how should we solve this issue:
- When preparing the literature data? Every literature value that do not appear in the annotations should not be stored in the literature file (test with a set of unique ids from the annotations)
- When writing in the literature file? Lines with NaNs should not be stored.
- When reading the literature file during fitting? Lines with NaNs should be ignored.
|
0.0
|
67d273c11bcb85dd7a5132dbaf879f88effba112
|
[
"tests/app/test_cell_densities.py::test_measurements_to_average_densities",
"tests/densities/test_measurement_to_density.py::test_remove_unknown_regions"
] |
[
"tests/app/test_cell_densities.py::test_cell_density",
"tests/app/test_cell_densities.py::test_glia_cell_densities",
"tests/app/test_cell_densities.py::test_inhibitory_and_excitatory_neuron_densities",
"tests/app/test_cell_densities.py::test_compile_measurements",
"tests/app/test_cell_densities.py::test_fit_average_densities",
"tests/app/test_cell_densities.py::test_zero_negative_values",
"tests/densities/test_measurement_to_density.py::test_get_hierarchy_info",
"tests/densities/test_measurement_to_density.py::test_get_parent_region",
"tests/densities/test_measurement_to_density.py::test_cell_count_to_density",
"tests/densities/test_measurement_to_density.py::test_cell_proportion_to_density",
"tests/densities/test_measurement_to_density.py::test_get_average_voxel_count_per_slice",
"tests/densities/test_measurement_to_density.py::test_cell_count_per_slice_to_density",
"tests/densities/test_measurement_to_density.py::test_measurement_to_average_density",
"tests/densities/test_measurement_to_density.py::test_remove_non_density_measurements"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-06 18:19:39+00:00
|
apache-2.0
| 115
|
|
executablebooks__mdit-py-plugins-46
|
diff --git a/mdit_py_plugins/dollarmath/index.py b/mdit_py_plugins/dollarmath/index.py
index 2663617..e2a4fed 100644
--- a/mdit_py_plugins/dollarmath/index.py
+++ b/mdit_py_plugins/dollarmath/index.py
@@ -13,6 +13,7 @@ def dollarmath_plugin(
allow_labels: bool = True,
allow_space: bool = True,
allow_digits: bool = True,
+ allow_blank_lines: bool = True,
double_inline: bool = False,
label_normalizer: Optional[Callable[[str], str]] = None,
renderer: Optional[Callable[[str, Dict[str, Any]], str]] = None,
@@ -30,6 +31,10 @@ def dollarmath_plugin(
:param allow_digits: Parse inline math when there is a digit
before/after the opening/closing ``$``, e.g. ``1$`` or ``$2``.
This is useful when also using currency.
+ :param allow_blank_lines: Allow blank lines inside ``$$``. Note that blank lines are
+ not allowed in LaTeX, executablebooks/markdown-it-dollarmath, or the Github or
+ StackExchange markdown dialects. Hoever, they have special semantics if used
+ within Sphinx `..math` admonitions, so are allowed for backwards-compatibility.
:param double_inline: Search for double-dollar math within inline contexts
:param label_normalizer: Function to normalize the label,
by default replaces whitespace with `-`
@@ -47,7 +52,9 @@ def dollarmath_plugin(
math_inline_dollar(allow_space, allow_digits, double_inline),
)
md.block.ruler.before(
- "fence", "math_block", math_block_dollar(allow_labels, label_normalizer)
+ "fence",
+ "math_block",
+ math_block_dollar(allow_labels, label_normalizer, allow_blank_lines),
)
# TODO the current render rules are really just for testing
@@ -246,6 +253,7 @@ DOLLAR_EQNO_REV = re.compile(r"^\s*\)([^)$\r\n]+?)\(\s*\${2}")
def math_block_dollar(
allow_labels: bool = True,
label_normalizer: Optional[Callable[[str], str]] = None,
+ allow_blank_lines: bool = False,
) -> Callable[[StateBlock, int, int, bool], bool]:
"""Generate block dollar rule."""
@@ -299,15 +307,14 @@ def math_block_dollar(
start = state.bMarks[nextLine] + state.tShift[nextLine]
end = state.eMarks[nextLine]
- if end - start < 2:
- continue
-
lineText = state.src[start:end]
if lineText.strip().endswith("$$"):
haveEndMarker = True
end = end - 2 - (len(lineText) - len(lineText.strip()))
break
+ if lineText.strip() == "" and not allow_blank_lines:
+ break # blank lines are not allowed within $$
# reverse the line and match
if allow_labels:
|
executablebooks/mdit-py-plugins
|
be145875645970545ccbd409e0221f85a0b4d329
|
diff --git a/tests/fixtures/dollar_math.md b/tests/fixtures/dollar_math.md
index a1c2d43..c6554ec 100644
--- a/tests/fixtures/dollar_math.md
+++ b/tests/fixtures/dollar_math.md
@@ -226,6 +226,18 @@ b = 2
</div>
.
+display equation with blank lines. (valid=False)
+.
+$$
+1+1=2
+
+$$
+.
+<p>$$
+1+1=2</p>
+<p>$$</p>
+.
+
equation followed by a labelled equation (valid=True)
.
$$
diff --git a/tests/test_dollarmath.py b/tests/test_dollarmath.py
index 62f52ef..6ae0345 100644
--- a/tests/test_dollarmath.py
+++ b/tests/test_dollarmath.py
@@ -92,7 +92,11 @@ def test_custom_renderer(data_regression):
)
def test_dollarmath_fixtures(line, title, input, expected):
md = MarkdownIt("commonmark").use(
- dollarmath_plugin, allow_space=False, allow_digits=False, double_inline=True
+ dollarmath_plugin,
+ allow_space=False,
+ allow_digits=False,
+ double_inline=True,
+ allow_blank_lines=False,
)
md.options.xhtmlOut = False
text = md.render(input)
|
Do not allow blank lines within $$
See https://github.com/executablebooks/markdown-it-dollarmath/pull/8
|
0.0
|
be145875645970545ccbd409e0221f85a0b4d329
|
[
"tests/test_dollarmath.py::test_dollarmath_fixtures[1-single",
"tests/test_dollarmath.py::test_dollarmath_fixtures[8-double-dollar-$$\\n-<p>$$</p>\\n]",
"tests/test_dollarmath.py::test_dollarmath_fixtures[15-single",
"tests/test_dollarmath.py::test_dollarmath_fixtures[22-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[29-simple",
"tests/test_dollarmath.py::test_dollarmath_fixtures[36-simple",
"tests/test_dollarmath.py::test_dollarmath_fixtures[43-equation",
"tests/test_dollarmath.py::test_dollarmath_fixtures[50-use",
"tests/test_dollarmath.py::test_dollarmath_fixtures[57-use",
"tests/test_dollarmath.py::test_dollarmath_fixtures[64-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[71-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[78-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[85-exponentiation",
"tests/test_dollarmath.py::test_dollarmath_fixtures[92-conjugate",
"tests/test_dollarmath.py::test_dollarmath_fixtures[99-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[108-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[118-single",
"tests/test_dollarmath.py::test_dollarmath_fixtures[127-display",
"tests/test_dollarmath.py::test_dollarmath_fixtures[136-display",
"tests/test_dollarmath.py::test_dollarmath_fixtures[146-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[158-underline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[168-after",
"tests/test_dollarmath.py::test_dollarmath_fixtures[179-following",
"tests/test_dollarmath.py::test_dollarmath_fixtures[186-consecutive",
"tests/test_dollarmath.py::test_dollarmath_fixtures[193-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[200-display",
"tests/test_dollarmath.py::test_dollarmath_fixtures[211-multiple",
"tests/test_dollarmath.py::test_dollarmath_fixtures[229-display",
"tests/test_dollarmath.py::test_dollarmath_fixtures[241-equation",
"tests/test_dollarmath.py::test_dollarmath_fixtures[260-multiline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[275-vector",
"tests/test_dollarmath.py::test_dollarmath_fixtures[288-display",
"tests/test_dollarmath.py::test_dollarmath_fixtures[298-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[305-equation",
"tests/test_dollarmath.py::test_dollarmath_fixtures[319-numbered",
"tests/test_dollarmath.py::test_dollarmath_fixtures[334-Equations",
"tests/test_dollarmath.py::test_dollarmath_fixtures[350-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[357-Sum",
"tests/test_dollarmath.py::test_dollarmath_fixtures[366-Sum",
"tests/test_dollarmath.py::test_dollarmath_fixtures[376-equation",
"tests/test_dollarmath.py::test_dollarmath_fixtures[390-inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[403-display",
"tests/test_dollarmath.py::test_dollarmath_fixtures[421-mixed",
"tests/test_dollarmath.py::test_dollarmath_fixtures[441-dollar",
"tests/test_dollarmath.py::test_dollarmath_fixtures[450-empty",
"tests/test_dollarmath.py::test_dollarmath_fixtures[460-or",
"tests/test_dollarmath.py::test_dollarmath_fixtures[471-new",
"tests/test_dollarmath.py::test_dollarmath_fixtures[480-math-escaping:",
"tests/test_dollarmath.py::test_dollarmath_fixtures[487-math-escaping:",
"tests/test_dollarmath.py::test_dollarmath_fixtures[494-math-escaping:",
"tests/test_dollarmath.py::test_dollarmath_fixtures[501-math-escaping:",
"tests/test_dollarmath.py::test_dollarmath_fixtures[508-math-escaping:",
"tests/test_dollarmath.py::test_dollarmath_fixtures[515-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[522-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[529-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[536-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[543-Inline",
"tests/test_dollarmath.py::test_dollarmath_fixtures[556-display"
] |
[
"tests/test_dollarmath.py::test_inline_func",
"tests/test_dollarmath.py::test_block_func",
"tests/test_dollarmath.py::test_plugin_parse",
"tests/test_dollarmath.py::test_custom_renderer"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-06-29 14:35:35+00:00
|
mit
| 2,210
|
|
leonardt__ast_tools-80
|
diff --git a/ast_tools/passes/ssa.py b/ast_tools/passes/ssa.py
index 01d90fa..b37a642 100644
--- a/ast_tools/passes/ssa.py
+++ b/ast_tools/passes/ssa.py
@@ -394,9 +394,7 @@ class SSATransformer(NodeTrackingTransformer):
# Need to visit params to get them to be rebuilt and therfore
# tracked to build the symbol table
- self._skip += 1
update_params = updated_node.params.visit(self)
- self._skip -= 1
assert not self._skip
assert not self._assigned_names, self._assigned_names
new_body = updated_node.body.visit(self)
@@ -531,6 +529,16 @@ class SSATransformer(NodeTrackingTransformer):
def leave_Arg_keyword(self, node: cst.Arg):
self._skip -= 1
+ def visit_Parameters(self, node: cst.Parameters) -> tp.Optional[bool]:
+ self._skip += 1
+ return True
+
+ def leave_Parameters(self,
+ original_node: cst.Parameters,
+ updated_node: cst.Parameters) -> cst.Parameters:
+ self._skip -= 1
+ return updated_node
+
def leave_Name(self,
original_node: cst.Name,
updated_node: cst.Name) -> cst.Name:
|
leonardt/ast_tools
|
cfa081e3f24bf23dbe84cd50ecea8a320027de75
|
diff --git a/tests/test_ssa.py b/tests/test_ssa.py
index 4c35f7e..56786c4 100644
--- a/tests/test_ssa.py
+++ b/tests/test_ssa.py
@@ -36,6 +36,7 @@ def _do_ssa(func, strict, **kwargs):
func = dec(func)
return func
+
@pytest.mark.parametrize('strict', [True, False])
@pytest.mark.parametrize('a', template_options)
@pytest.mark.parametrize('b', template_options)
@@ -457,3 +458,20 @@ def test_call_in_annotations(strict, x, y):
f1 = exec_def_in_file(tree, env)
f2 = apply_passes([ssa(strict)])(f1)
+
[email protected]('strict', [True, False])
+def test_issue_79(strict):
+ class Wrapper:
+ def __init__(self, val):
+ self.val = val
+ def apply(self, f):
+ return f(self.val)
+
+ def f1(x):
+ return x.apply(lambda x: x+1)
+
+ f2 = apply_passes([ssa(strict)])(f1)
+
+ for _ in range(8):
+ x = Wrapper(random.randint(0, 1<<10))
+ assert f1(x) == f2(x)
|
Regression in fault test suite
I'm not sure how important this is, but with the latest release the following example taken from fault doesn't work anymore with the SSA pass
```python
import magma as m
import fault
class MWrapperMeta(m.MagmaProtocolMeta):
def __getitem__(cls, T):
assert cls is MWrapper
return type(cls)(f'MWrapper[{T}]', (cls,), {'_T_': T})
def _to_magma_(cls):
return cls._T_
def _qualify_magma_(cls, d):
return MWrapper[cls._T_.qualify(d)]
def _flip_magma_(cls):
return MWrapper[cls._T_.flip()]
def _from_magma_value_(cls, value):
return cls(value)
class MWrapper(m.MagmaProtocol, metaclass=MWrapperMeta):
def __init__(self, val):
if not isinstance(val, type(self)._T_):
raise TypeError()
self._value_ = val
def _get_magma_value_(self):
return self._value_
def apply(self, f):
return f(self._value_)
WrappedBits8 = MWrapper[m.UInt[8]]
@m.sequential2()
class Foo:
def __call__(self, val: WrappedBits8) -> m.UInt[8]:
return val.apply(lambda x: x + 1)
def test_proto():
tester = fault.Tester(Foo)
tester.circuit.val = 1
tester.eval()
tester.circuit.O.expect(2)
tester.compile_and_run("verilator", flags=['-Wno-unused'])
```
It produces this error
```python
../ast_tools/ast_tools/passes/ssa.py:514: in leave_Assign
assert not self._assigned_names, (to_module(original_node).code, self._assigned_names)
E AssertionError: ('__0_return_0 = val.apply(lambda x: x + 1)', ['x_0'])
```
Upon investigation, it seems like the SSA pass is treating the `lambda x: x + 1` expression to contain an assignment (perhaps because the `x` inside the lambda parameter is treated as a "store"). Adding support for lambda (not sure whether it worked before or just missed this case) seems doable (we need to treat it as a store, but remove the assigned names after processing the lambda, so we use the new name for the parameters, but the correct SSA names for anything else coming from the outer scope). Not sure if this is something we need though, so just documenting it here.
CC @cdonovick
|
0.0
|
cfa081e3f24bf23dbe84cd50ecea8a320027de75
|
[
"tests/test_ssa.py::test_issue_79[True]",
"tests/test_ssa.py::test_issue_79[False]"
] |
[
"tests/test_ssa.py::test_basic_if[r",
"tests/test_ssa.py::test_basic_if[return-r",
"tests/test_ssa.py::test_basic_if[return-return-True]",
"tests/test_ssa.py::test_basic_if[return-return-False]",
"tests/test_ssa.py::test_nested[r",
"tests/test_ssa.py::test_nested[return-r",
"tests/test_ssa.py::test_nested[return-return-r",
"tests/test_ssa.py::test_nested[return-return-return-r",
"tests/test_ssa.py::test_nested[return-return-return-return-True]",
"tests/test_ssa.py::test_nested[return-return-return-return-False]",
"tests/test_ssa.py::test_imbalanced[r",
"tests/test_ssa.py::test_imbalanced[return-r",
"tests/test_ssa.py::test_imbalanced[return-return-r",
"tests/test_ssa.py::test_imbalanced[return-return-0-r",
"tests/test_ssa.py::test_imbalanced[return-return-0-0-True]",
"tests/test_ssa.py::test_imbalanced[return-return-0-0-False]",
"tests/test_ssa.py::test_reassign_arg",
"tests/test_ssa.py::test_double_nested_function_call",
"tests/test_ssa.py::test_attrs_basic[True]",
"tests/test_ssa.py::test_attrs_basic[False]",
"tests/test_ssa.py::test_attrs_returns[True]",
"tests/test_ssa.py::test_attrs_returns[False]",
"tests/test_ssa.py::test_attrs_class[True]",
"tests/test_ssa.py::test_attrs_class[False]",
"tests/test_ssa.py::test_attrs_class_methods[True]",
"tests/test_ssa.py::test_attrs_class_methods[False]",
"tests/test_ssa.py::test_nstrict",
"tests/test_ssa.py::test_attr",
"tests/test_ssa.py::test_call",
"tests/test_ssa.py::test_call_in_annotations[--True]",
"tests/test_ssa.py::test_call_in_annotations[--False]",
"tests/test_ssa.py::test_call_in_annotations[-int-True]",
"tests/test_ssa.py::test_call_in_annotations[-int-False]",
"tests/test_ssa.py::test_call_in_annotations[-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[-ident(x=int)-False]",
"tests/test_ssa.py::test_call_in_annotations[int--True]",
"tests/test_ssa.py::test_call_in_annotations[int--False]",
"tests/test_ssa.py::test_call_in_annotations[int-int-True]",
"tests/test_ssa.py::test_call_in_annotations[int-int-False]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[int-ident(x=int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)--True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)--False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-int-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-int-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(int)-ident(x=int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)--True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)--False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-int-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-int-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(int)-False]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(x=int)-True]",
"tests/test_ssa.py::test_call_in_annotations[ident(x=int)-ident(x=int)-False]"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2021-06-22 01:07:05+00:00
|
apache-2.0
| 3,545
|
|
smartcar__python-sdk-82
|
diff --git a/.releaserc.js b/.releaserc.js
new file mode 100644
index 0000000..5c86c28
--- /dev/null
+++ b/.releaserc.js
@@ -0,0 +1,31 @@
+'use strict';
+
+module.exports = {
+ branches: 'master',
+ plugins: [
+ '@semantic-release/commit-analyzer',
+ [
+ '@google/semantic-release-replace-plugin',
+ {
+ replacements: [
+ {
+ files: ['smartcar/__init__.py'],
+ from: "__version__ = 'semantic-release'",
+ to: "__version__ = '${nextRelease.version}'",
+ results: [
+ {
+ file: 'smartcar/__init__.py',
+ hasChanged: true,
+ numMatches: 1,
+ numReplacements: 1,
+ },
+ ],
+ countMatches: true,
+ },
+ ],
+ },
+ ],
+ '@semantic-release/release-notes-generator',
+ '@semantic-release/github',
+ ],
+};
diff --git a/.travis.yml b/.travis.yml
index 0f47f2e..5d22b00 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,8 +8,7 @@ addons:
apt:
packages: firefox-geckodriver
-language:
- - python
+language: python
python:
- '2.7'
@@ -30,33 +29,21 @@ script:
jobs:
include:
- - stage: tag
- language: generic
- # no install necessary for generic language
- install: true
- script:
- - git config --global user.email "[email protected]"
- - git config --global user.name "Travis CI User"
- - export tag=$(cat smartcar/__init__.py | grep '^__version__' | sed "s/^__version__[[:blank:]]*=[[:blank:]]'\(.*\)'/\1/g")
- - if [ "$TRAVIS_BRANCH" = "master" ]; then git tag -a v$tag -m "Travis Generated Tag"; fi
- deploy:
- provider: script
- skip_cleanup: true
- script: echo -e "machine github.com\n login $CI_USER_TOKEN" >> ~/.netrc && git push origin v$tag
- on:
- branch: master
-
- stage: publish
- language: python
- python:
- - '3.8'
- # use 'true' to noop the install and script stageswhich are required for
- # the python language
- install: true
- script: true
+ python: '3.8'
+ services: []
+ addons:
+ firefox: 'skip'
+ apt: []
+ install:
+ - nvm install 14
+ - npm install [email protected] @google/[email protected]
+ script:
+ - npx semantic-release
+ - head -1 smartcar/__init__.py
deploy:
provider: pypi
- user: $PYPI_USERNAME
+ username: $PYPI_USERNAME
password: $PYPI_PASSWORD
on:
branch: master
diff --git a/smartcar/__init__.py b/smartcar/__init__.py
index 8f9d826..1ce06b1 100644
--- a/smartcar/__init__.py
+++ b/smartcar/__init__.py
@@ -1,4 +1,4 @@
-__version__ = '4.3.3'
+__version__ = 'semantic-release'
from .smartcar import (AuthClient, is_expired, get_user_id, get_vehicle_ids)
from .vehicle import Vehicle
|
smartcar/python-sdk
|
c4ff88a82df60f198742d8e06069ec33d4fc7900
|
diff --git a/tests/test_requester.py b/tests/test_requester.py
index fe629f4..81bea65 100644
--- a/tests/test_requester.py
+++ b/tests/test_requester.py
@@ -28,7 +28,7 @@ class TestRequester(unittest.TestCase):
smartcar.requester.call('GET', self.URL)
self.assertRegexpMatches(
responses.calls[0].request.headers['User-Agent'],
- r'^Smartcar\/(\d+\.\d+\.\d+) \((\w+); (\w+)\) Python v(\d+\.\d+\.\d+)$')
+ r'^Smartcar\/semantic-release \((\w+); (\w+)\) Python v(\d+\.\d+\.\d+)$')
@responses.activate
def test_oauth_error(self):
|
Changelog
Please keep a changelog summarizing user-facing issues so upgrades are easy to understand: https://keepachangelog.com/en/1.0.0/
Thanks :)
|
0.0
|
c4ff88a82df60f198742d8e06069ec33d4fc7900
|
[
"tests/test_requester.py::TestRequester::test_user_agent"
] |
[
"tests/test_requester.py::TestRequester::test_400",
"tests/test_requester.py::TestRequester::test_401",
"tests/test_requester.py::TestRequester::test_403",
"tests/test_requester.py::TestRequester::test_404",
"tests/test_requester.py::TestRequester::test_409",
"tests/test_requester.py::TestRequester::test_429",
"tests/test_requester.py::TestRequester::test_430",
"tests/test_requester.py::TestRequester::test_500",
"tests/test_requester.py::TestRequester::test_504",
"tests/test_requester.py::TestRequester::test_oauth_error",
"tests/test_requester.py::TestRequester::test_other",
"tests/test_requester.py::TestRequester::test_smartcar_not_capable_error",
"tests/test_requester.py::TestRequester::test_unknown_error",
"tests/test_requester.py::TestRequester::test_vehicle_not_capable_error"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-10-02 00:57:05+00:00
|
mit
| 5,564
|
|
maxfischer2781__cms_perf-6
|
diff --git a/cms_perf/cli.py b/cms_perf/cli.py
index 5f504eb..3dc73e6 100644
--- a/cms_perf/cli.py
+++ b/cms_perf/cli.py
@@ -1,6 +1,7 @@
import argparse
from . import xrd_load
+from . import net_load
from . import __version__ as lib_version
@@ -68,6 +69,27 @@ CLI_PAG = CLI.add_subparsers(
title="pag plugins", description="Sensor to use for the pag measurement",
)
+# pag System Plugins
+CLI_PAG_NUMSOCK = CLI_PAG.add_parser(
+ "pag=num_sockets", help="Total sockets across all processes",
+)
+CLI_PAG_NUMSOCK.add_argument(
+ "--max-sockets",
+ default=1000,
+ help="Maximum total sockets considered 100%%",
+ type=int,
+)
+CLI_PAG_NUMSOCK.add_argument(
+ "--socket-kind",
+ help="Which sockets to count",
+ choices=list(net_load.ConnectionKind.__members__),
+ default="tcp",
+)
+CLI_PAG_NUMSOCK.set_defaults(
+ __make_pag__=lambda args: net_load.prepare_num_sockets(
+ net_load.ConnectionKind.__getitem__(args.socket_kind), args.max_sockets
+ )
+)
# pag XRootD Plugins
CLI_PAG_XIOWAIT = CLI_PAG.add_parser(
diff --git a/cms_perf/net_load.py b/cms_perf/net_load.py
new file mode 100644
index 0000000..ebf8003
--- /dev/null
+++ b/cms_perf/net_load.py
@@ -0,0 +1,28 @@
+"""
+Sensor for network load
+"""
+import enum
+
+import psutil
+
+
+class ConnectionKind(enum.Enum):
+ inet = enum.auto()
+ inet4 = enum.auto()
+ inet6 = enum.auto()
+ tcp = enum.auto()
+ tcp4 = enum.auto()
+ tcp6 = enum.auto()
+ udp = enum.auto()
+ udp4 = enum.auto()
+ udp6 = enum.auto()
+ unix = enum.auto()
+ all = enum.auto()
+
+
+def prepare_num_sockets(kind: ConnectionKind, max_sockets):
+ return lambda: 100.0 * num_sockets(kind) / max_sockets
+
+
+def num_sockets(kind: ConnectionKind) -> float:
+ return len(psutil.net_connections(kind=kind.name))
diff --git a/cms_perf/xrd_load.py b/cms_perf/xrd_load.py
index 0c4813f..46c4b18 100644
--- a/cms_perf/xrd_load.py
+++ b/cms_perf/xrd_load.py
@@ -13,17 +13,17 @@ def rescan(interval):
def prepare_iowait(interval: float):
tracker = XrootdTracker(rescan_interval=rescan(interval))
- return tracker.io_wait
+ return lambda: 100.0 * tracker.io_wait()
def prepare_numfds(interval: float, max_core_fds: float):
tracker = XrootdTracker(rescan_interval=rescan(interval))
- return lambda: tracker.num_fds() / max_core_fds / psutil.cpu_count()
+ return lambda: 100.0 * tracker.num_fds() / max_core_fds / psutil.cpu_count()
def prepare_threads(interval: float, max_core_threads: float):
tracker = XrootdTracker(rescan_interval=rescan(interval))
- return lambda: tracker.num_threads() / max_core_threads / psutil.cpu_count()
+ return lambda: 100.0 * tracker.num_threads() / max_core_threads / psutil.cpu_count()
def is_alive(proc: psutil.Process) -> bool:
|
maxfischer2781/cms_perf
|
f1ee7a28ea9d6af3a96965763c656fc4b8aa46ce
|
diff --git a/tests/test_cli.py b/tests/test_cli.py
index a9e4617..ace0296 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -37,7 +37,7 @@ def test_run_sched(executable: List[str]):
assert total == readings[0]
-PAG_PLUGINS = ["xrootd.io_wait", "xrootd.num_fds", "xrootd.num_threads"]
+PAG_PLUGINS = ["num_sockets", "xrootd.io_wait", "xrootd.num_fds", "xrootd.num_threads"]
@mimicry.skipif_unsuported
|
Feature: Provide connection load sensor
Add a sensor for number of open connections. This is an indication for the number of requests the system must handle, and possibly if connections are stalling.
See [``psutil.net_connections``](https://psutil.readthedocs.io/en/latest/#psutil.net_connections) for a potential backend.
|
0.0
|
f1ee7a28ea9d6af3a96965763c656fc4b8aa46ce
|
[
"tests/test_cli.py::test_run_pag_plugin[num_sockets-executable0]",
"tests/test_cli.py::test_run_pag_plugin[num_sockets-executable1]"
] |
[
"tests/test_cli.py::test_run_normal[executable0]",
"tests/test_cli.py::test_run_normal[executable1]",
"tests/test_cli.py::test_run_sched[executable0]",
"tests/test_cli.py::test_run_sched[executable1]",
"tests/test_cli.py::test_run_pag_plugin[xrootd.io_wait-executable0]",
"tests/test_cli.py::test_run_pag_plugin[xrootd.io_wait-executable1]",
"tests/test_cli.py::test_run_pag_plugin[xrootd.num_fds-executable0]",
"tests/test_cli.py::test_run_pag_plugin[xrootd.num_fds-executable1]",
"tests/test_cli.py::test_run_pag_plugin[xrootd.num_threads-executable0]",
"tests/test_cli.py::test_run_pag_plugin[xrootd.num_threads-executable1]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-07-22 12:01:56+00:00
|
mit
| 3,833
|
|
mfuentesg__SyncSettings-151
|
diff --git a/.gitignore b/.gitignore
index 8babfb9..2c1bd69 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,5 @@ tests/options.json
.idea
cover
.coverage
+
+pyproject.toml
diff --git a/sync_settings/commands/download.py b/sync_settings/commands/download.py
index d0f2985..5a00a8e 100644
--- a/sync_settings/commands/download.py
+++ b/sync_settings/commands/download.py
@@ -1,6 +1,5 @@
# -*- coding: utf-8 -*-
-import json
import os
import sublime
import sublime_plugin
@@ -50,7 +49,7 @@ class SyncSettingsDownloadCommand(sublime_plugin.WindowCommand):
file_content = manager.get_content(
path.join(self.temp_folder, path.encode('Package Control.sublime-settings'))
)
- package_settings = json.loads('{}' if file_content == '' else file_content)
+ package_settings = sublime.decode_value('{}' if file_content == '' else file_content)
# read installed_packages from remote reference and merge it with the local version
local_settings = sublime.load_settings('Package Control.sublime-settings')
setting = 'installed_packages'
diff --git a/sync_settings/libs/gist.py b/sync_settings/libs/gist.py
index a6cdc70..ec6a5d2 100644
--- a/sync_settings/libs/gist.py
+++ b/sync_settings/libs/gist.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
-import json
+import sublime
import re
import requests
from functools import wraps
@@ -63,14 +63,14 @@ class Gist:
def create(self, data):
if not isinstance(data, dict) or not len(data):
raise ValueError('Gist can`t be created without data')
- return self.__do_request('post', self.make_uri(), data=json.dumps(data)).json()
+ return self.__do_request('post', self.make_uri(), data=sublime.encode_value(data, True)).json()
@auth
@with_gid
def update(self, gid, data):
if not isinstance(data, dict) or not len(data):
raise ValueError('Gist can`t be updated without data')
- return self.__do_request('patch', self.make_uri(gid), data=json.dumps(data)).json()
+ return self.__do_request('patch', self.make_uri(gid), data=sublime.encode_value(data, True)).json()
@auth
@with_gid
diff --git a/sync_settings/sync_version.py b/sync_settings/sync_version.py
index 2f28eda..7e37570 100644
--- a/sync_settings/sync_version.py
+++ b/sync_settings/sync_version.py
@@ -1,7 +1,6 @@
# -*- coding: utf-8 -*
import sublime
-import json
import os
from .libs.gist import Gist
from .libs import settings, path
@@ -14,7 +13,7 @@ def get_local_version():
return {}
try:
with open(file_path) as f:
- return json.load(f)
+ return sublime.decode_value(f.read())
except: # noqa: E722
pass
return {}
@@ -37,7 +36,7 @@ def get_remote_version():
def update_config_file(info):
with open(file_path, 'w') as f:
- json.dump(info, f)
+ f.write(sublime.encode_value(info, True))
def show_update_dialog(on_yes=None):
|
mfuentesg/SyncSettings
|
5c7fd2595b3c4fe8672d33bc9c6043f4b192613d
|
diff --git a/tests/mocks/sublime_mock.py b/tests/mocks/sublime_mock.py
index c7d5c25..d22a014 100644
--- a/tests/mocks/sublime_mock.py
+++ b/tests/mocks/sublime_mock.py
@@ -1,3 +1,6 @@
+import json
+import re
+
DIALOG_YES = 1
@@ -29,3 +32,12 @@ def load_settings(*args):
'included_files': [],
'excluded_files': []
})
+
+
+def encode_value(data, pretty):
+ return json.dumps(data)
+
+
+def decode_value(content):
+ decoded = re.sub(re.compile(r"/\*.*?\*/", re.DOTALL), "", content)
+ return json.loads(re.sub(re.compile(r"//.*?\n"), "", decoded))
diff --git a/tests/test_sync_version.py b/tests/test_sync_version.py
index 7a4930d..e518ce8 100644
--- a/tests/test_sync_version.py
+++ b/tests/test_sync_version.py
@@ -42,6 +42,17 @@ class TestSyncVersion(unittest.TestCase):
v = version.get_local_version()
self.assertDictEqual({'hash': '123123123', 'created_at': '2019-01-11T02:15:15Z'}, v)
+ @mock.patch('sync_settings.libs.path.exists', mock.MagicMock(return_value=True))
+ @mock.patch(
+ 'sync_settings.sync_version.open',
+ mock.mock_open(
+ read_data='{"created_at": "2019-01-11T02:15:15Z", /* some comment */"hash": "123123123"}'
+ ),
+ )
+ def test_get_local_version_with_commented_content(self):
+ v = version.get_local_version()
+ self.assertDictEqual({"hash": "123123123", "created_at": "2019-01-11T02:15:15Z"}, v)
+
@mock.patch('sublime.yes_no_cancel_dialog', mock.MagicMock(return_value=1))
def test_show_update_dialog(self):
def on_done():
|
No JSON object could be decoded
Hey! Faced a problem.
Maybe it correlates with the new version of sublime text 4070 alpha

```
WARNING:Sync Settings.sync_settings.libs.logger:{'message': 'Not Found', 'documentation_url': 'https://developer.github.com/v3/gists/#delete-a-gist'}
ERROR:Sync Settings.sync_settings.libs.logger:No JSON object could be decoded
Traceback (most recent call last):
File "./python3.3/json/decoder.py", line 367, in raw_decode
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\krupi\AppData\Roaming\Sublime Text 3\Installed Packages\Sync Settings.sublime-package\sync_settings/commands/download.py", line 53, in download
package_settings = json.loads('{}' if file_content == '' else file_content)
File "./python3.3/json/__init__.py", line 316, in loads
File "./python3.3/json/decoder.py", line 351, in decode
File "./python3.3/json/decoder.py", line 369, in raw_decode
ValueError: No JSON object could be decoded
ERROR:Sync Settings.sync_settings.libs.logger:No JSON object could be decoded
Traceback (most recent call last):
File "./python3.3/json/decoder.py", line 367, in raw_decode
StopIteration
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\krupi\AppData\Roaming\Sublime Text 3\Installed Packages\Sync Settings.sublime-package\sync_settings/commands/download.py", line 53, in download
package_settings = json.loads('{}' if file_content == '' else file_content)
File "./python3.3/json/__init__.py", line 316, in loads
File "./python3.3/json/decoder.py", line 351, in decode
File "./python3.3/json/decoder.py", line 369, in raw_decode
ValueError: No JSON object could be decoded
```
My gist
https://gist.github.com/krupitskas/b272fea836faffa356c8f0cce9a121b3
|
0.0
|
5c7fd2595b3c4fe8672d33bc9c6043f4b192613d
|
[
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_with_commented_content"
] |
[
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_empty_json",
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_invalid_content",
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_no_file",
"tests/test_sync_version.py::TestSyncVersion::test_get_local_version_with_content",
"tests/test_sync_version.py::TestSyncVersion::test_get_remote_version",
"tests/test_sync_version.py::TestSyncVersion::test_get_remote_version_failed",
"tests/test_sync_version.py::TestSyncVersion::test_show_update_dialog",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_outdated_version",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_same_version",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_without_local_version",
"tests/test_sync_version.py::TestSyncVersion::test_upgrade_without_remote_version"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_media",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-05-10 20:39:47+00:00
|
mit
| 3,873
|
|
jaraco__inflect-125
|
diff --git a/CHANGES.rst b/CHANGES.rst
index 5902ef4..a1c8450 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,3 +1,8 @@
+v5.3.0
+======
+
+* #108: Add support for pluralizing open compound nouns.
+
v5.2.0
======
diff --git a/inflect.py b/inflect.py
index 23fece2..3bb946b 100644
--- a/inflect.py
+++ b/inflect.py
@@ -2286,8 +2286,19 @@ class engine:
def postprocess(self, orig: str, inflected) -> str:
inflected = str(inflected)
if "|" in inflected:
- inflected = inflected.split("|")[self.classical_dict["all"]]
- result = inflected.split(" ")
+ word_options = inflected.split("|")
+ # When two parts of a noun need to be pluralized
+ if len(word_options[0].split(" ")) == len(word_options[1].split(" ")):
+ result = inflected.split("|")[self.classical_dict["all"]].split(" ")
+ # When only the last part of the noun needs to be pluralized
+ else:
+ result = inflected.split(" ")
+ for index, word in enumerate(result):
+ if "|" in word:
+ result[index] = word.split("|")[self.classical_dict["all"]]
+ else:
+ result = inflected.split(" ")
+
# Try to fix word wise capitalization
for index, word in enumerate(orig.split(" ")):
if word == "I":
|
jaraco/inflect
|
532444a0dd9db7ee7c40ebe384f8412b28b835fd
|
diff --git a/tests/test_compounds.py b/tests/test_compounds.py
index 52030e8..d968f97 100644
--- a/tests/test_compounds.py
+++ b/tests/test_compounds.py
@@ -61,3 +61,30 @@ def test_unit_handling_combined():
}
for singular, plural in test_cases.items():
assert p.plural(singular) == plural
+
+
+def test_unit_open_compound_nouns():
+ test_cases = {
+ "high school": "high schools",
+ "master genie": "master genies",
+ "MASTER genie": "MASTER genies",
+ "Blood brother": "Blood brothers",
+ "prima donna": "prima donnas",
+ "prima DONNA": "prima DONNAS",
+ }
+ for singular, plural in test_cases.items():
+ assert p.plural(singular) == plural
+
+
+def test_unit_open_compound_nouns_classical():
+ p.classical(all=True)
+ test_cases = {
+ "master genie": "master genii",
+ "MASTER genie": "MASTER genii",
+ "Blood brother": "Blood brethren",
+ "prima donna": "prime donne",
+ "prima DONNA": "prime DONNE",
+ }
+ for singular, plural in test_cases.items():
+ assert p.plural(singular) == plural
+ p.classical(all=False)
|
list index out of range in engine.postprocess
Inflect pip installed into virtual environment running python3.7 on ubuntu 18.04
After getting an error I wrote some debug statements into inflect.py starting on line 2215:
def postprocess(self, orig, inflected):
inflected = str(inflected)
print("Inflected: " + inflected)
if "|" in inflected:
inflected = inflected.split("|")[self.classical_dict["all"]]
result = inflected.split(" ")
print("Orig: " + orig)
print("Inflected: " + inflected)
# Try to fix word wise capitalization
for index, word in enumerate(orig.split(" ")):
...
My code:
>>> import inflect
>>> p = inflect.engine()
>>> p.classical(all=True)
>>> p.plural("Genie")
Inflected: genies|genii
Orig: Genie
Inflected: genii
'Genii'
>>> p.plural("Master Genie")
Inflected: Master genies|genii
Orig: Master Genie
Inflected: genii
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/omitted/for/privacy/lib/python3.7/site-packages/inflect.py", line 2260, in plural
or self._plnoun(word, count),
File "/omitted/for/privacy/lib/python3.7/site-packages/inflect.py", line 2230, in postprocess
result[index] = result[index].capitalize()
IndexError: list index out of range
So it looks like something went wrong in this line:
inflected = inflected.split("|")[self.classical_dict["all"]]
Instead of returning "master genii" it returns just "genii"
**Ninja Edit**: This would be occuring on line 2226 in postprocess without the debug statements
|
0.0
|
532444a0dd9db7ee7c40ebe384f8412b28b835fd
|
[
"tests/test_compounds.py::test_unit_open_compound_nouns_classical"
] |
[
"tests/test_compounds.py::test_compound_1",
"tests/test_compounds.py::test_unit_handling_fractional",
"tests/test_compounds.py::test_compound_3",
"tests/test_compounds.py::test_unit_handling_combined",
"tests/test_compounds.py::test_unit_handling_degree",
"tests/test_compounds.py::test_unit_open_compound_nouns",
"tests/test_compounds.py::test_compound_4",
"tests/test_compounds.py::test_compound_2",
"tests/test_compounds.py::BLACK",
"tests/test_compounds.py::mypy"
] |
{
"failed_lite_validators": [
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-02-28 22:14:55+00:00
|
mit
| 3,217
|
|
torrua__keyboa-15
|
diff --git a/keyboa/button.py b/keyboa/button.py
index 6280ba8..9b70703 100644
--- a/keyboa/button.py
+++ b/keyboa/button.py
@@ -46,6 +46,9 @@ class Button(ButtonCheck):
back_marker: CallbackDataMarker = str()
copy_text_to_callback: Optional[bool] = None
+ def __call__(self, *args, **kwargs):
+ return self.generate()
+
def generate(self) -> InlineKeyboardButton:
"""
This function creates an InlineKeyboardButton object from various data types,
|
torrua/keyboa
|
f70ec7162e4352726922d60088f2bce9e88fc96f
|
diff --git a/tests/test_base.py b/tests/test_base.py
index 78dc2cb..1c77e28 100644
--- a/tests/test_base.py
+++ b/tests/test_base.py
@@ -17,7 +17,7 @@ def test_items_is_none_or_empty():
:return:
"""
with pytest.raises(ValueError) as _:
- Keyboa(items=list())
+ Keyboa(items=[])
with pytest.raises(ValueError) as _:
Keyboa(items=None)
diff --git a/tests/test_button.py b/tests/test_button.py
index b0a8ad1..435183a 100644
--- a/tests/test_button.py
+++ b/tests/test_button.py
@@ -37,7 +37,7 @@ UNACCEPTABLE_BUTTON_SOURCE_TYPES = (
{2, "a"},
{"a", 2},
[2, "a"],
- (2, dict()),
+ (2, {}),
["a", 2],
(None, 2),
(None, None),
@@ -284,3 +284,8 @@ def test_none_as_markers():
def test_button_property():
btn = Button(button_data="button_text", copy_text_to_callback=True).button
assert isinstance(btn, InlineKeyboardButton)
+
+
+def test_button_call_method():
+ btn = Button(button_data="button_text", copy_text_to_callback=True)
+ assert isinstance(btn(), InlineKeyboardButton)
diff --git a/tests/test_keyboard.py b/tests/test_keyboard.py
index 41627fb..0709297 100644
--- a/tests/test_keyboard.py
+++ b/tests/test_keyboard.py
@@ -667,3 +667,22 @@ def test_kb_with_items_in_row_and_last_buttons():
items_in_row=2,
).keyboard
assert len(keyboa.keyboard) == 4
+
+
+def test_kb_is_callable():
+ keyboa = Keyboa(
+ items=[
+ (1, "a"),
+ (2, "b"),
+ (3, "c"),
+ (4, "d"),
+ (5, "e"),
+ (6, "f"),
+ ],
+ back_marker="_is_callable",
+ items_in_row=2,
+ )
+ assert type(keyboa.keyboard) == type(keyboa())
+ assert keyboa.keyboard.to_json() == keyboa().to_json() == keyboa.slice().to_json()
+ assert keyboa.slice(slice(3)).to_json() == keyboa(slice(3)).to_json()
+ assert keyboa.slice(slice(2, 4, 2)).to_json() == keyboa(slice(2, 4, 2)).to_json()
|
(PTC-W0019) Consider using literal syntax to create the data structure
## Description
Using the literal syntax can give minor performance bumps compared to using function calls to create `dict`, `list` and `tuple`.
## Occurrences
There are 2 occurrences of this issue in the repository.
See all occurrences on DeepSource → [deepsource.io/gh/torrua/keyboa/issue/PTC-W0019/occurrences/](https://deepsource.io/gh/torrua/keyboa/issue/PTC-W0019/occurrences/)
|
0.0
|
f70ec7162e4352726922d60088f2bce9e88fc96f
|
[
"tests/test_button.py::test_button_call_method"
] |
[
"tests/test_base.py::test_items_is_none_or_empty",
"tests/test_base.py::test_copy_text_to_callback_is_not_bool",
"tests/test_base.py::test_number_of_items_out_of_limits",
"tests/test_base.py::test_number_of_items_in_row_out_of_limits",
"tests/test_button.py::test_acceptable_button_source_types[2_0]",
"tests/test_button.py::test_acceptable_button_source_types[a]",
"tests/test_button.py::test_acceptable_button_source_types[2_1]",
"tests/test_button.py::test_acceptable_button_source_types[button_data3]",
"tests/test_button.py::test_acceptable_button_source_types[button_data4]",
"tests/test_button.py::test_acceptable_button_source_types[button_data5]",
"tests/test_button.py::test_acceptable_button_source_types[button_data6]",
"tests/test_button.py::test_acceptable_button_source_types[button_data7]",
"tests/test_button.py::test_unacceptable_button_source_types_without_callback[2_0]",
"tests/test_button.py::test_unacceptable_button_source_types_without_callback[a]",
"tests/test_button.py::test_unacceptable_button_source_types_without_callback[2_1]",
"tests/test_button.py::test_unacceptable_button_source_types_without_callback[button_data3]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data0]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data1]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data2]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data3]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data4]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data5]",
"tests/test_button.py::test_unacceptable_button_source_types[button_data6]",
"tests/test_button.py::test_unacceptable_button_source_types[None]",
"tests/test_button.py::test_unacceptable_front_marker_type",
"tests/test_button.py::test_unacceptable_back_marker_type",
"tests/test_button.py::test_unacceptable_callback_data_type",
"tests/test_button.py::test_unacceptable_text_type[button_data0]",
"tests/test_button.py::test_unacceptable_text_type[button_data1]",
"tests/test_button.py::test_unacceptable_text_type[button_data2]",
"tests/test_button.py::test_create_button_from_dict_tuple_list[button_data0]",
"tests/test_button.py::test_create_button_from_dict_tuple_list[button_data1]",
"tests/test_button.py::test_create_button_from_int_or_str_with_copy_option[12345_0]",
"tests/test_button.py::test_create_button_from_int_or_str_with_copy_option[12345_1]",
"tests/test_button.py::test_create_button_from_int_or_str_without_copy_option[12345_0]",
"tests/test_button.py::test_create_button_from_int_or_str_without_copy_option[12345_1]",
"tests/test_button.py::test_create_button_from_int_or_str_without_callback[12345_0]",
"tests/test_button.py::test_create_button_from_int_or_str_without_callback[12345_1]",
"tests/test_button.py::test_create_button_from_button",
"tests/test_button.py::test_empty_text",
"tests/test_button.py::test_empty_callback_data",
"tests/test_button.py::test_big_callback_data",
"tests/test_button.py::test_none_as_markers",
"tests/test_button.py::test_button_property",
"tests/test_keyboard.py::test_keyboards_is_none",
"tests/test_keyboard.py::test_keyboards_is_single_keyboard",
"tests/test_keyboard.py::test_keyboards_is_multi_keyboards",
"tests/test_keyboard.py::test_not_keyboard_for_merge",
"tests/test_keyboard.py::test_merge_two_keyboard_into_one_out_of_limits",
"tests/test_keyboard.py::test_pass_string_with_copy_to_callback",
"tests/test_keyboard.py::test_pass_string_without_copy_to_callback",
"tests/test_keyboard.py::test_pass_one_button",
"tests/test_keyboard.py::test_pass_one_item_dict_with_text_field",
"tests/test_keyboard.py::test_pass_one_item_dict_without_text_field",
"tests/test_keyboard.py::test_pass_multi_item_dict_without_text_field",
"tests/test_keyboard.py::test_pass_one_row",
"tests/test_keyboard.py::test_pass_structure",
"tests/test_keyboard.py::test_auto_keyboa_maker_alignment",
"tests/test_keyboard.py::test_auto_keyboa_maker_items_in_row",
"tests/test_keyboard.py::test_slice",
"tests/test_keyboard.py::test_minimal_kb_with_copy_text_to_callback_specified_none",
"tests/test_keyboard.py::test_minimal_kb_with_items_out_of_limits",
"tests/test_keyboard.py::test_minimal_kb_with_copy_text_to_callback_specified_true",
"tests/test_keyboard.py::test_minimal_kb_with_copy_text_to_callback_specified_false",
"tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[2]",
"tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[3]",
"tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[4]",
"tests/test_keyboard.py::test_minimal_kb_with_fixed_items_in_row[6]",
"tests/test_keyboard.py::test_minimal_kb_with_front_marker",
"tests/test_keyboard.py::test_minimal_kb_with_front_marker_and_copy_text_to_callback",
"tests/test_keyboard.py::test_minimal_kb_with_back_marker",
"tests/test_keyboard.py::test_minimal_kb_with_back_marker_out_of_limits",
"tests/test_keyboard.py::test_minimal_kb_with_back_marker_out_of_limits_with_text",
"tests/test_keyboard.py::test_minimal_kb_with_empty_back_marker",
"tests/test_keyboard.py::test_minimal_kb_with_back_marker_and_copy_text_to_callback",
"tests/test_keyboard.py::test_minimal_kb_with_front_and_back_markers",
"tests/test_keyboard.py::test_minimal_kb_with_front_and_back_markers_and_copy_text_to_callback",
"tests/test_keyboard.py::test_minimal_kb_with_front_and_back_markers_and_copy_text_to_callback_is_false",
"tests/test_keyboard.py::test_minimal_kb_with_alignment_true",
"tests/test_keyboard.py::test_minimal_kb_with_items_in_row",
"tests/test_keyboard.py::test_minimal_kb_with_items_in_row_out_of_limits",
"tests/test_keyboard.py::test_minimal_kb_with_alignment_true_slice",
"tests/test_keyboard.py::test_minimal_kb_with_alignment_true_and_reversed_alignment_true",
"tests/test_keyboard.py::test_minimal_kb_with_alignment_specified",
"tests/test_keyboard.py::test_minimal_kb_with_alignment_specified_out_of_limits",
"tests/test_keyboard.py::test_minimal_kb_with_alignment_specified_and_reversed_alignment_true",
"tests/test_keyboard.py::test_minimal_kb_with_reversed_alignment_true",
"tests/test_keyboard.py::test_minimal_kb_with_all_parameters_specified_reversed_range_true",
"tests/test_keyboard.py::test_minimal_kb_with_all_parameters_specified_reversed_range_false",
"tests/test_keyboard.py::test_structured_kb",
"tests/test_keyboard.py::test_structured_kb_with_alignment",
"tests/test_keyboard.py::test_structured_kb_with_items_in_row",
"tests/test_keyboard.py::test_structured_kb_with_front_marker",
"tests/test_keyboard.py::test_structured_kb_with_front_marker_no_copy_text_to_callback",
"tests/test_keyboard.py::test_kb_from_tuples",
"tests/test_keyboard.py::test_kb_from_tuples_with_front_marker",
"tests/test_keyboard.py::test_kb_from_tuples_with_back_marker_and_items_in_row",
"tests/test_keyboard.py::test_kb_with_items_in_row_and_last_buttons",
"tests/test_keyboard.py::test_kb_is_callable"
] |
{
"failed_lite_validators": [
"has_hyperlinks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-10-01 04:11:39+00:00
|
mit
| 6,064
|
|
unt-libraries__pyuntl-43
|
diff --git a/pyuntl/untl_structure.py b/pyuntl/untl_structure.py
index b47c3fc..25b2487 100644
--- a/pyuntl/untl_structure.py
+++ b/pyuntl/untl_structure.py
@@ -1,5 +1,6 @@
import socket
import json
+import sys
import urllib.request
from lxml.etree import Element, SubElement, tostring
from pyuntl import UNTL_XML_ORDER, VOCABULARIES_URL
@@ -457,6 +458,42 @@ class Metadata(UNTLElement):
# Create the form object.
return FormGenerator(**kwargs)
+ def make_hidden(self):
+ """Make an unhidden record hidden."""
+ for element in self.children:
+ if element.tag == 'meta' and element.qualifier == 'hidden':
+ # Make the element hidden.
+ if element.content == 'False':
+ element.content = 'True'
+ return None
+ # Create a hidden meta element if it doesn't exist.
+ hidden_element = PYUNTL_DISPATCH['meta'](qualifier='hidden', content='True')
+ self.children.append(hidden_element)
+
+ def make_unhidden(self):
+ """Make a hidden record unhidden."""
+ for element in self.children:
+ if element.tag == 'meta' and element.qualifier == 'hidden':
+ # Make the element unhidden.
+ if element.content == 'True':
+ element.content = 'False'
+ return None
+ # Create a hidden meta element if it doesn't exist.
+ hidden_element = PYUNTL_DISPATCH['meta'](qualifier='hidden', content='False')
+ self.children.append(hidden_element)
+
+ @property
+ def is_hidden(self):
+ """Return True if a UNTL element is hidden."""
+ for element in self.children:
+ if element.tag == 'meta' and element.qualifier == 'hidden':
+ if element.content == 'True':
+ return True
+ else:
+ return False
+ sys.stderr.write('A hidden meta element does not exist.')
+ return False
+
class Title(UNTLElement):
def __init__(self, **kwargs):
|
unt-libraries/pyuntl
|
a02066a0d2607c16de1591c23b71f4d36abc7591
|
diff --git a/tests/untl_structure_test.py b/tests/untl_structure_test.py
index 3e82016..8f08cb6 100644
--- a/tests/untl_structure_test.py
+++ b/tests/untl_structure_test.py
@@ -587,3 +587,67 @@ def test_generate_form_data(_):
assert isinstance(fg, us.FormGenerator)
# Check missing children were added.
assert len(metadata.children) == len(metadata.contained_children)
+
+
[email protected]('test_input_content, test_output',
+ [
+ ([us.Meta(content='DC', qualifier='system'),
+ us.Meta(content='True', qualifier='hidden')],
+ True),
+ ([us.Meta(content='DC', qualifier='system'),
+ us.Meta(content='False', qualifier='hidden')],
+ False),
+ ])
+def test_Metadata_is_hidden(test_input_content, test_output):
+ """Check if a UNTL element is hidden."""
+ metadata = us.Metadata()
+ metadata.children = test_input_content
+ assert metadata.is_hidden is test_output
+
+
+def test_Metadata_is_hidden_with_no_meta_hidden_element(capsys):
+ metadata = us.Metadata()
+ metadata.children = [us.Meta(content='DC', qualifier='system')]
+ assert metadata.is_hidden is False
+ captured = capsys.readouterr()
+ assert captured.err == 'A hidden meta element does not exist.'
+
+
[email protected]('test_input_elements, test_input_content',
+ [
+ ([us.Meta(content='DC', qualifier='system'),
+ us.Meta(content='True', qualifier='hidden')],
+ True),
+ ([us.Meta(content='DC', qualifier='system'),
+ us.Meta(content='False', qualifier='hidden')],
+ False),
+ ([us.Meta(content='DC', qualifier='system')],
+ False)
+ ])
+def test_Metadata_make_hidden(test_input_elements, test_input_content):
+ """Test if a UNTL unhidden element is altered to hidden."""
+ metadata = us.Metadata()
+ metadata.children = test_input_elements
+ assert metadata.is_hidden is test_input_content
+ metadata.make_hidden()
+ assert metadata.is_hidden is True
+
+
[email protected]('test_input_elements, test_input_content',
+ [
+ ([us.Meta(content='DC', qualifier='system'),
+ us.Meta(content='True', qualifier='hidden')],
+ True),
+ ([us.Meta(content='DC', qualifier='system'),
+ us.Meta(content='False', qualifier='hidden')],
+ False),
+ ([us.Meta(content='DC', qualifier='system')],
+ False)
+ ])
+def test_Metadata_make_unhidden(test_input_elements, test_input_content):
+ """Test if a UNTL hidden element is altered to unhidden."""
+ metadata = us.Metadata()
+ metadata.children = test_input_elements
+ assert metadata.is_hidden is test_input_content
+ metadata.make_unhidden()
+ assert metadata.is_hidden is False
|
Add hidden/unhidden helpers to pyuntl
I was thinking that it might be useful to add a few common methods and properties to the python version of a UNTL record.
I was thinking we could add two properties.
`record.is_hidden` and `record.is_unhidden` that would return a `True` or `False` depending on the record.
We could also add two methods, `record.make_hidden()` and `record.make_unhidden()` that could be used to adjust these values.
I'm open to different language, formatting, and whatnot on the methods or property names.
|
0.0
|
a02066a0d2607c16de1591c23b71f4d36abc7591
|
[
"tests/untl_structure_test.py::test_Metadata_is_hidden[test_input_content0-True]",
"tests/untl_structure_test.py::test_Metadata_is_hidden[test_input_content1-False]",
"tests/untl_structure_test.py::test_Metadata_is_hidden_with_no_meta_hidden_element",
"tests/untl_structure_test.py::test_Metadata_make_hidden[test_input_elements0-True]",
"tests/untl_structure_test.py::test_Metadata_make_hidden[test_input_elements1-False]",
"tests/untl_structure_test.py::test_Metadata_make_hidden[test_input_elements2-False]",
"tests/untl_structure_test.py::test_Metadata_make_unhidden[test_input_elements0-True]",
"tests/untl_structure_test.py::test_Metadata_make_unhidden[test_input_elements1-False]",
"tests/untl_structure_test.py::test_Metadata_make_unhidden[test_input_elements2-False]"
] |
[
"tests/untl_structure_test.py::test_UNTLStructureException",
"tests/untl_structure_test.py::test_create_untl_xml_subelement_no_children",
"tests/untl_structure_test.py::test_create_untl_xml_subelement_children",
"tests/untl_structure_test.py::test_add_missing_children",
"tests/untl_structure_test.py::test_UNTLElement_init",
"tests/untl_structure_test.py::test_UNTLElement_set_qualifier",
"tests/untl_structure_test.py::test_UNTLElement_set_qualifier_exception",
"tests/untl_structure_test.py::test_UNTLElement_add_child",
"tests/untl_structure_test.py::test_UNTLElement_add_child_exception",
"tests/untl_structure_test.py::test_UNTLElement_set_content",
"tests/untl_structure_test.py::test_UNTLElement_set_content_exception",
"tests/untl_structure_test.py::test_UNTLElement_add_form_qualifier_and_content",
"tests/untl_structure_test.py::test_UNTLElement_add_form_qualifier_and_content_mocked",
"tests/untl_structure_test.py::test_UNTLElement_add_form_qualifier_only",
"tests/untl_structure_test.py::test_UNTLElement_add_form_qualifier_only_mocked",
"tests/untl_structure_test.py::test_UNTLElement_add_form_content_only_no_parent_tag",
"tests/untl_structure_test.py::test_UNTLElement_add_form_content_only_no_parent_tag_mocked",
"tests/untl_structure_test.py::test_UNTLElement_add_form_content_and_parent_tag",
"tests/untl_structure_test.py::test_UNTLElement_add_form_content_and_parent_tag_mocked",
"tests/untl_structure_test.py::test_UNTLElement_add_form_no_qualifier_no_content_no_parent_tag",
"tests/untl_structure_test.py::test_UNTLElement_add_form_no_qualifier_no_content_no_parent_tag_mocked",
"tests/untl_structure_test.py::test_UNTLElement_add_form_no_qualifier_no_content_parent_tag",
"tests/untl_structure_test.py::test_UNTLElement_add_form_no_qualifier_no_content_parent_tag_mocked",
"tests/untl_structure_test.py::test_UNTLElement_completeness",
"tests/untl_structure_test.py::test_UNTLElement_record_length",
"tests/untl_structure_test.py::test_UNTLElement_record_content_length",
"tests/untl_structure_test.py::test_FormGenerator",
"tests/untl_structure_test.py::test_FormGenerator_hidden_is_alone",
"tests/untl_structure_test.py::test_FormGenerator_adjustable_items",
"tests/untl_structure_test.py::test_FormGenerator_get_vocabularies",
"tests/untl_structure_test.py::test_FormGenerator_fails_without_vocab_service",
"tests/untl_structure_test.py::test_Metadata_create_xml_string",
"tests/untl_structure_test.py::test_Metadata_create_xml",
"tests/untl_structure_test.py::test_Metadata_create_xml_use_namespace",
"tests/untl_structure_test.py::test_Metadata_create_element_dict",
"tests/untl_structure_test.py::test_Metadata_create_xml_file",
"tests/untl_structure_test.py::test_Metadata_create_xml_file_ascii_hex",
"tests/untl_structure_test.py::test_Metadata_create_xml_file_exception_raised",
"tests/untl_structure_test.py::test_Metadata_sort_untl",
"tests/untl_structure_test.py::test_Metadata_validate",
"tests/untl_structure_test.py::test_generate_form_data"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2020-04-13 23:12:10+00:00
|
bsd-3-clause
| 6,176
|
|
oasis-open__cti-taxii-server-44
|
diff --git a/medallion/backends/memory_backend.py b/medallion/backends/memory_backend.py
index 8da67b0..b07ae14 100644
--- a/medallion/backends/memory_backend.py
+++ b/medallion/backends/memory_backend.py
@@ -163,22 +163,23 @@ class MemoryBackend(Backend):
try:
for new_obj in objs["objects"]:
id_and_version_already_present = False
- if new_obj["id"] in collection["objects"]:
- current_obj = collection["objects"][new_obj["id"]]
- if "modified" in new_obj:
- if new_obj["modified"] == current_obj["modified"]:
+ for obj in collection["objects"]:
+ id_and_version_already_present = False
+
+ if new_obj['id'] == obj['id']:
+ if "modified" in new_obj:
+ if new_obj["modified"] == obj["modified"]:
+ id_and_version_already_present = True
+ else:
+ # There is no modified field, so this object is immutable
id_and_version_already_present = True
- else:
- # There is no modified field, so this object is immutable
- id_and_version_already_present = True
if not id_and_version_already_present:
collection["objects"].append(new_obj)
self._update_manifest(new_obj, api_root, collection["id"])
successes.append(new_obj["id"])
succeeded += 1
else:
- failures.append({"id": new_obj["id"],
- "message": "Unable to process object"})
+ failures.append({"id": new_obj["id"], "message": "Unable to process object"})
failed += 1
except Exception as e:
raise ProcessingError("While processing supplied content, an error occured", e)
|
oasis-open/cti-taxii-server
|
c99f8dcd93ad5f06174fc2767771acd491bedaaa
|
diff --git a/medallion/test/test_memory_backend.py b/medallion/test/test_memory_backend.py
index a23b9f1..7c9f982 100644
--- a/medallion/test/test_memory_backend.py
+++ b/medallion/test/test_memory_backend.py
@@ -207,6 +207,54 @@ class TestTAXIIServerWithMemoryBackend(unittest.TestCase):
assert manifests["objects"][0]["id"] == new_id
# ------------- BEGIN: end manifest section ------------- #
+ def test_add_existing_objects(self):
+ new_bundle = copy.deepcopy(API_OBJECTS_2)
+ new_id = "indicator--%s" % uuid.uuid4()
+ new_bundle["objects"][0]["id"] = new_id
+
+ # ------------- BEGIN: add object section ------------- #
+
+ post_header = copy.deepcopy(self.auth)
+ post_header["Content-Type"] = MEDIA_TYPE_STIX_V20
+ post_header["Accept"] = MEDIA_TYPE_TAXII_V20
+
+ r_post = self.client.post(
+ test.ADD_OBJECTS_EP,
+ data=json.dumps(new_bundle),
+ headers=post_header
+ )
+ self.assertEqual(r_post.status_code, 202)
+ self.assertEqual(r_post.content_type, MEDIA_TYPE_TAXII_V20)
+
+ # ------------- END: add object section ------------- #
+ # ------------- BEGIN: add object again section ------------- #
+
+ r_post = self.client.post(
+ test.ADD_OBJECTS_EP,
+ data=json.dumps(new_bundle),
+ headers=post_header
+ )
+ status_response2 = self.load_json_response(r_post.data)
+ self.assertEqual(r_post.status_code, 202)
+ self.assertEqual(status_response2["success_count"], 0)
+ self.assertEqual(status_response2["failures"][0]["message"],
+ "Unable to process object")
+
+ # ------------- END: add object again section ------------- #
+ # ------------- BEGIN: get object section ------------- #
+
+ get_header = copy.deepcopy(self.auth)
+ get_header["Accept"] = MEDIA_TYPE_STIX_V20
+
+ r_get = self.client.get(
+ test.GET_OBJECTS_EP + "?match[id]=%s" % new_id,
+ headers=get_header
+ )
+ self.assertEqual(r_get.status_code, 200)
+ objs = self.load_json_response(r_get.data)
+ self.assertEqual(len(objs["objects"]), 1)
+ self.assertEqual(objs["objects"][0]["id"], new_id)
+
def test_client_object_versioning(self):
new_id = "indicator--%s" % uuid.uuid4()
new_bundle = copy.deepcopy(API_OBJECTS_2)
|
BUG: checking new object IDs as existing already always fail
https://github.com/oasis-open/cti-taxii-server/blob/f6398926d866989201e754dca0fec1e1a041acdf/medallion/backends/memory_backend.py#L166
This line will always fail, you're checking that the ID of the object belongs in the array of objects instead of checking if the ID of the object exists as the ID of _any_ of the objects
|
0.0
|
c99f8dcd93ad5f06174fc2767771acd491bedaaa
|
[
"medallion/test/test_memory_backend.py::TestTAXIIServerWithMemoryBackend::test_add_existing_objects"
] |
[] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2018-10-04 01:26:04+00:00
|
bsd-3-clause
| 4,323
|
|
google__cyanobyte-128
|
diff --git a/cloudbuild-deploy.yaml b/cloudbuild-deploy.yaml
index 83f0570..dbb30dd 100644
--- a/cloudbuild-deploy.yaml
+++ b/cloudbuild-deploy.yaml
@@ -16,6 +16,28 @@ steps:
args: ['-c', 'wget -O- https://github.com/gohugoio/hugo/releases/download/v${_HUGO}/hugo_extended_${_HUGO}_Linux-64bit.tar.gz | tar zx']
id: Install Hugo manually
+- name: docker.io/library/python:3.7
+ entrypoint: python3
+ id: Generate Hugo-compatible Markdown for our peripherals
+ args:
+ - 'src/codegen.py'
+ - '-t'
+ - 'templates/doc.md'
+ - '-o'
+ - './docs/content/docs/Reference/Peripheral Docs'
+ - '-i'
+ - 'peripherals/ADS1015.yaml'
+ - '-i'
+ - 'peripherals/BMP280.yaml'
+ - '-i'
+ - 'peripherals/LSM303D.yaml'
+ - '-i'
+ - 'peripherals/MCP4725.yaml'
+ - '-i'
+ - 'peripherals/MCP9808.yaml'
+ - '-i'
+ - 'peripherals/TCS3472.yaml'
+
- name: gcr.io/cloud-builders/git
entrypoint: bash
id: Move up content directory
diff --git a/docs/content/docs/Reference/Peripheral Docs/_index.md b/docs/content/docs/Reference/Peripheral Docs/_index.md
new file mode 100644
index 0000000..b1d2fe4
--- /dev/null
+++ b/docs/content/docs/Reference/Peripheral Docs/_index.md
@@ -0,0 +1,7 @@
+---
+title: "Auto-generated reference docs"
+linkTitle: "Auto-generated reference docs"
+weight: 1
+description: >
+ Hosted peripherals
+---
diff --git a/templates/doc.md b/templates/doc.md
index b27738e..4c880b6 100644
--- a/templates/doc.md
+++ b/templates/doc.md
@@ -1,5 +1,10 @@
-# {{ info.title }}
-{{ info.description }}
+---
+title: "{{info.title}}"
+linkTitle: "{{info.title}}"
+weight: 4
+description: >
+ {{info.description}}
+---
## Registers
{% for register in registers %}
@@ -17,4 +22,4 @@ _{{ info.title }} version {{ info.version }}_
_Based on Cyanobyte spec version {{cyanobyte}}_
-_Generated from Cyanobyte Codegen version {{ version }}_
\ No newline at end of file
+_Generated from Cyanobyte Codegen version {{ version }}_
|
google/cyanobyte
|
429e255bc41341c9993ca4d9bd7d46a283f05202
|
diff --git a/test/sampleData/markdown/ADS1015.md b/test/sampleData/markdown/ADS1015.md
index b188fd7..eb84995 100644
--- a/test/sampleData/markdown/ADS1015.md
+++ b/test/sampleData/markdown/ADS1015.md
@@ -1,5 +1,10 @@
-# ADS1015
-Texas Instruments Analog-Digital Converter
+---
+title: "ADS1015"
+linkTitle: "ADS1015"
+weight: 4
+description: >
+ Texas Instruments Analog-Digital Converter
+---
## Registers
diff --git a/test/sampleData/markdown/BMP280.md b/test/sampleData/markdown/BMP280.md
index 000fa87..85b7389 100644
--- a/test/sampleData/markdown/BMP280.md
+++ b/test/sampleData/markdown/BMP280.md
@@ -1,5 +1,10 @@
-# BMP280
-Bosch Digital Pressure Sensor
+---
+title: "BMP280"
+linkTitle: "BMP280"
+weight: 4
+description: >
+ Bosch Digital Pressure Sensor
+---
## Registers
diff --git a/test/sampleData/markdown/LSM303D.md b/test/sampleData/markdown/LSM303D.md
index 8f7b201..67711b9 100644
--- a/test/sampleData/markdown/LSM303D.md
+++ b/test/sampleData/markdown/LSM303D.md
@@ -1,5 +1,10 @@
-# LSM303D
-STMicroelectronics accelerometer and magnetometer
+---
+title: "LSM303D"
+linkTitle: "LSM303D"
+weight: 4
+description: >
+ STMicroelectronics accelerometer and magnetometer
+---
## Registers
diff --git a/test/sampleData/markdown/MCP4725.md b/test/sampleData/markdown/MCP4725.md
index d4bbc26..2c9f700 100644
--- a/test/sampleData/markdown/MCP4725.md
+++ b/test/sampleData/markdown/MCP4725.md
@@ -1,5 +1,10 @@
-# MCP4725
-Microchip 4725 Digital-to-Analog Converter
+---
+title: "MCP4725"
+linkTitle: "MCP4725"
+weight: 4
+description: >
+ Microchip 4725 Digital-to-Analog Converter
+---
## Registers
diff --git a/test/sampleData/markdown/MCP9808.md b/test/sampleData/markdown/MCP9808.md
index f923450..6d879e1 100644
--- a/test/sampleData/markdown/MCP9808.md
+++ b/test/sampleData/markdown/MCP9808.md
@@ -1,5 +1,10 @@
-# MCP9808
-This is a test description
+---
+title: "MCP9808"
+linkTitle: "MCP9808"
+weight: 4
+description: >
+ This is a test description
+---
## Registers
diff --git a/test/sampleData/markdown/TCS3472.md b/test/sampleData/markdown/TCS3472.md
index 26907e5..1d80532 100644
--- a/test/sampleData/markdown/TCS3472.md
+++ b/test/sampleData/markdown/TCS3472.md
@@ -1,5 +1,10 @@
-# TCS3472
-Color Light-to-Digital Converter with IR Filter
+---
+title: "TCS3472"
+linkTitle: "TCS3472"
+weight: 4
+description: >
+ Color Light-to-Digital Converter with IR Filter
+---
## Registers
|
Add doc generation from markdown template
|
0.0
|
429e255bc41341c9993ca4d9bd7d46a283f05202
|
[
"test/test_codegen.py::TestCodegen::test_Markdown"
] |
[
"test/test_codegen.py::TestCodegen::test_Arduino",
"test/test_codegen.py::TestCodegen::test_Kubos",
"test/test_codegen.py::TestCodegen::test_RaspberryPi"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_added_files",
"has_many_modified_files"
],
"has_test_patch": true,
"is_lite": false
}
|
2020-03-04 21:42:32+00:00
|
apache-2.0
| 2,565
|
|
python__mypy_extensions-47
|
diff --git a/mypy_extensions.py b/mypy_extensions.py
index 6600b21..aff5145 100644
--- a/mypy_extensions.py
+++ b/mypy_extensions.py
@@ -42,17 +42,32 @@ def _typeddict_new(cls, _typename, _fields=None, **kwargs):
except (AttributeError, ValueError):
pass
- return _TypedDictMeta(_typename, (), ns)
+ return _TypedDictMeta(_typename, (), ns, _from_functional_call=True)
class _TypedDictMeta(type):
- def __new__(cls, name, bases, ns, total=True):
+ def __new__(cls, name, bases, ns, total=True, _from_functional_call=False):
# Create new typed dict class object.
# This method is called directly when TypedDict is subclassed,
# or via _typeddict_new when TypedDict is instantiated. This way
# TypedDict supports all three syntaxes described in its docstring.
# Subclasses and instances of TypedDict return actual dictionaries
# via _dict_new.
+
+ # We need the `if TypedDict in globals()` check,
+ # or we emit a DeprecationWarning when creating mypy_extensions.TypedDict itself
+ if 'TypedDict' in globals():
+ import warnings
+ warnings.warn(
+ (
+ "mypy_extensions.TypedDict is deprecated, "
+ "and will be removed in a future version. "
+ "Use typing.TypedDict or typing_extensions.TypedDict instead."
+ ),
+ DeprecationWarning,
+ stacklevel=(3 if _from_functional_call else 2)
+ )
+
ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns)
diff --git a/tox.ini b/tox.ini
index 9f64a32..b0766b5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,7 +5,7 @@ envlist = py35, py36, py37, py38, py39, py310, py311
[testenv]
description = run the test driver with {basepython}
-commands = python -m unittest discover tests
+commands = python -We -m unittest discover tests
[testenv:lint]
description = check the code style
|
python/mypy_extensions
|
a1b59f718e28c5dc897272211e0d00e0f58cc908
|
diff --git a/tests/testextensions.py b/tests/testextensions.py
index 991c4e5..a41f5f6 100644
--- a/tests/testextensions.py
+++ b/tests/testextensions.py
@@ -1,6 +1,8 @@
import sys
import pickle
import typing
+from contextlib import contextmanager
+from textwrap import dedent
from unittest import TestCase, main, skipUnless
from mypy_extensions import TypedDict, i64, i32, i16, u8
@@ -25,17 +27,22 @@ class BaseTestCase(TestCase):
PY36 = sys.version_info[:2] >= (3, 6)
PY36_TESTS = """
-Label = TypedDict('Label', [('label', str)])
+import warnings
-class Point2D(TypedDict):
- x: int
- y: int
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore", category=DeprecationWarning)
-class LabelPoint2D(Point2D, Label): ...
+ Label = TypedDict('Label', [('label', str)])
-class Options(TypedDict, total=False):
- log_level: int
- log_path: str
+ class Point2D(TypedDict):
+ x: int
+ y: int
+
+ class LabelPoint2D(Point2D, Label): ...
+
+ class Options(TypedDict, total=False):
+ log_level: int
+ log_path: str
"""
if PY36:
@@ -43,9 +50,16 @@ if PY36:
class TypedDictTests(BaseTestCase):
+ @contextmanager
+ def assert_typeddict_deprecated(self):
+ with self.assertWarnsRegex(
+ DeprecationWarning, "mypy_extensions.TypedDict is deprecated"
+ ):
+ yield
def test_basics_iterable_syntax(self):
- Emp = TypedDict('Emp', {'name': str, 'id': int})
+ with self.assert_typeddict_deprecated():
+ Emp = TypedDict('Emp', {'name': str, 'id': int})
self.assertIsSubclass(Emp, dict)
self.assertIsSubclass(Emp, typing.MutableMapping)
if sys.version_info[0] >= 3:
@@ -62,7 +76,8 @@ class TypedDictTests(BaseTestCase):
self.assertEqual(Emp.__total__, True)
def test_basics_keywords_syntax(self):
- Emp = TypedDict('Emp', name=str, id=int)
+ with self.assert_typeddict_deprecated():
+ Emp = TypedDict('Emp', name=str, id=int)
self.assertIsSubclass(Emp, dict)
self.assertIsSubclass(Emp, typing.MutableMapping)
if sys.version_info[0] >= 3:
@@ -79,7 +94,8 @@ class TypedDictTests(BaseTestCase):
self.assertEqual(Emp.__total__, True)
def test_typeddict_errors(self):
- Emp = TypedDict('Emp', {'name': str, 'id': int})
+ with self.assert_typeddict_deprecated():
+ Emp = TypedDict('Emp', {'name': str, 'id': int})
self.assertEqual(TypedDict.__module__, 'mypy_extensions')
jim = Emp(name='Jim', id=1)
with self.assertRaises(TypeError):
@@ -88,9 +104,9 @@ class TypedDictTests(BaseTestCase):
isinstance(jim, Emp) # type: ignore
with self.assertRaises(TypeError):
issubclass(dict, Emp) # type: ignore
- with self.assertRaises(TypeError):
+ with self.assertRaises(TypeError), self.assert_typeddict_deprecated():
TypedDict('Hi', x=())
- with self.assertRaises(TypeError):
+ with self.assertRaises(TypeError), self.assert_typeddict_deprecated():
TypedDict('Hi', [('x', int), ('y', ())])
with self.assertRaises(TypeError):
TypedDict('Hi', [('x', int)], y=int)
@@ -109,9 +125,20 @@ class TypedDictTests(BaseTestCase):
other = LabelPoint2D(x=0, y=1, label='hi') # noqa
self.assertEqual(other['label'], 'hi')
+ if PY36:
+ exec(dedent(
+ """
+ def test_py36_class_usage_emits_deprecations(self):
+ with self.assert_typeddict_deprecated():
+ class Foo(TypedDict):
+ bar: int
+ """
+ ))
+
def test_pickle(self):
global EmpD # pickle wants to reference the class by name
- EmpD = TypedDict('EmpD', name=str, id=int)
+ with self.assert_typeddict_deprecated():
+ EmpD = TypedDict('EmpD', name=str, id=int)
jane = EmpD({'name': 'jane', 'id': 37})
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
z = pickle.dumps(jane, proto)
@@ -123,13 +150,15 @@ class TypedDictTests(BaseTestCase):
self.assertEqual(EmpDnew({'name': 'jane', 'id': 37}), jane)
def test_optional(self):
- EmpD = TypedDict('EmpD', name=str, id=int)
+ with self.assert_typeddict_deprecated():
+ EmpD = TypedDict('EmpD', name=str, id=int)
self.assertEqual(typing.Optional[EmpD], typing.Union[None, EmpD])
self.assertNotEqual(typing.List[EmpD], typing.Tuple[EmpD])
def test_total(self):
- D = TypedDict('D', {'x': int}, total=False)
+ with self.assert_typeddict_deprecated():
+ D = TypedDict('D', {'x': int}, total=False)
self.assertEqual(D(), {})
self.assertEqual(D(x=1), {'x': 1})
self.assertEqual(D.__total__, False)
|
Deprecate `mypy_extensions.TypedDict`?
`mypy_extensions.TypedDict` is problematic for several reasons:
- `mypy_extensions.TypedDict` is missing quite a few features that have been added to `typing.TypedDict` at runtime (and have been backported to `typing_extensions.TypedDict`).
- I think the existence of both `mypy_extensions.TypedDict` and `typing_extensions.TypedDict` has the potential to be pretty confusing for users.
- It's quite annoying and error-prone that at typeshed, we have to remember to keep `typing._TypedDict`, `typing_extensions._TypedDict` and `mypy_extensions._TypedDict` all in sync. Unfortunately, we can't put them all in `_typeshed` and have all three modules import `_TypedDict` from `_typeshed`, as the three classes all have slightly subtle differences. (E.g. `mypy_extensions._TypedDict` doesn't have the `__required_keys__` and `__optional_keys__` ClassVars that both `typing._TypedDict` and `typing_extensions._TypedDict` have.)
- Mypy now maintains its own copy of the `mypy-extensions` stubs anyway, so its stubs for `mypy_extensions` are no longer automatically updated with each typeshed sync. That means that even if we update the stubs for `mypy_extensions.TypedDict` in typeshed (as we did in https://github.com/python/typeshed/pull/10565), those updates are no longer of any benefit to mypy users unless mypy maintainers remember to copy across the changes to their forked version of the `mypy_extensions` stubs.
I propose that we deprecate `mypy_extensions.TypedDict`, and steer people towards `typing_extensions.TypedDict` instead: it's up-to-date with the features on `typing.TypedDict`, it's much more comprehensively tested, and it has up-to-date stubs.
Thoughts?
|
0.0
|
a1b59f718e28c5dc897272211e0d00e0f58cc908
|
[
"tests/testextensions.py::TypedDictTests::test_basics_iterable_syntax",
"tests/testextensions.py::TypedDictTests::test_basics_keywords_syntax",
"tests/testextensions.py::TypedDictTests::test_optional",
"tests/testextensions.py::TypedDictTests::test_pickle",
"tests/testextensions.py::TypedDictTests::test_py36_class_usage_emits_deprecations",
"tests/testextensions.py::TypedDictTests::test_total",
"tests/testextensions.py::TypedDictTests::test_typeddict_errors"
] |
[
"tests/testextensions.py::TypedDictTests::test_py36_class_syntax_usage",
"tests/testextensions.py::MypycNativeIntTests::test_construction",
"tests/testextensions.py::MypycNativeIntTests::test_docstring",
"tests/testextensions.py::MypycNativeIntTests::test_isinstance"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-08-17 18:03:28+00:00
|
mit
| 5,118
|
|
ateliedocodigo__py-healthcheck-35
|
diff --git a/healthcheck/healthcheck.py b/healthcheck/healthcheck.py
index 25c8701..c8ae8d8 100644
--- a/healthcheck/healthcheck.py
+++ b/healthcheck/healthcheck.py
@@ -135,7 +135,10 @@ class HealthCheck(object):
# Reduce to 6 decimal points to have consistency with timestamp
elapsed_time = float('{:.6f}'.format(elapsed_time))
- if not passed:
+ if passed:
+ msg = 'Health check "{}" passed'.format(checker.__name__)
+ logger.debug(msg)
+ else:
msg = 'Health check "{}" failed with output "{}"'.format(checker.__name__, output)
logger.error(msg)
|
ateliedocodigo/py-healthcheck
|
e5b9643b1f6b5cc5ddf96e4c7d6f66920145cf69
|
diff --git a/tests/unit/test_healthcheck.py b/tests/unit/test_healthcheck.py
index bf0c7d1..7192434 100644
--- a/tests/unit/test_healthcheck.py
+++ b/tests/unit/test_healthcheck.py
@@ -31,7 +31,9 @@ class BasicHealthCheckTest(unittest.TestCase):
def test_success_check(self):
hc = HealthCheck(checkers=[self.check_that_works])
- message, status, headers = hc.run()
+ with self.assertLogs('healthcheck', level='DEBUG') as cm:
+ message, status, headers = hc.run()
+ self.assertEqual(cm.output, ['DEBUG:healthcheck.healthcheck:Health check "check_that_works" passed'])
self.assertEqual(200, status)
jr = json.loads(message)
self.assertEqual("success", jr["status"])
|
Add logger.debug if all health checks passed
As of now only [failures](https://github.com/ateliedocodigo/py-healthcheck/blob/e6205bdcc32099d12cda6eba172b4a801104448f/healthcheck/healthcheck.py#L140) and [exceptions](https://github.com/ateliedocodigo/py-healthcheck/blob/e6205bdcc32099d12cda6eba172b4a801104448f/healthcheck/healthcheck.py#L130) are logged.
Could we also log passing tests with a low log level, e.g. using `logger.debug`?
|
0.0
|
e5b9643b1f6b5cc5ddf96e4c7d6f66920145cf69
|
[
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_success_check"
] |
[
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_basic_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_function_failing_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_function_success_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_prevent_duplication",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_signature_function_failure_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_signature_function_success_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_signature_value_failing_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_signature_value_success_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_value_failing_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_custom_section_value_success_check",
"tests/unit/test_healthcheck.py::BasicHealthCheckTest::test_failing_check",
"tests/unit/test_healthcheck.py::TimeoutHealthCheckTest::test_default_timeout_should_success_check",
"tests/unit/test_healthcheck.py::TimeoutHealthCheckTest::test_error_timeout_function_should_failing_check"
] |
{
"failed_lite_validators": [
"has_short_problem_statement"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-05 11:07:05+00:00
|
mit
| 1,251
|
|
prefab-cloud__prefab-cloud-python-61
|
diff --git a/prefab_cloud_python/config_client.py b/prefab_cloud_python/config_client.py
index 7b25208..2d56c6e 100644
--- a/prefab_cloud_python/config_client.py
+++ b/prefab_cloud_python/config_client.py
@@ -55,6 +55,7 @@ class ConfigClient:
self.checkpoint_freq_secs = 60
self.config_loader = ConfigLoader(base_client)
self.config_resolver = ConfigResolver(base_client, self.config_loader)
+ self._cache_path = None
self.set_cache_path()
if self.options.is_local_only():
@@ -209,6 +210,8 @@ class ConfigClient:
def cache_configs(self, configs):
if not self.options.use_local_cache:
return
+ if not self.cache_path:
+ return
with open(self.cache_path, "w") as f:
f.write(MessageToJson(configs))
logger.debug(f"Cached configs to {self.cache_path}")
@@ -216,6 +219,8 @@ class ConfigClient:
def load_cache(self):
if not self.options.use_local_cache:
return False
+ if not self.cache_path:
+ return False
try:
with open(self.cache_path, "r") as f:
configs = Parse(f.read(), Prefab.Configs())
@@ -244,15 +249,19 @@ class ConfigClient:
logger.info(f"Unlocked config via {source}")
def set_cache_path(self):
- dir = os.environ.get(
- "XDG_CACHE_HOME", os.path.join(os.environ["HOME"], ".cache")
- )
- file_name = f"prefab.cache.{self.base_client.options.api_key_id}.json"
- self.cache_path = os.path.join(dir, file_name)
+ home_dir_cache_path = None
+ home_dir = os.environ.get("HOME")
+ if home_dir:
+ home_dir_cache_path = os.path.join(home_dir, ".cache")
+ cache_path = os.environ.get("XDG_CACHE_HOME", home_dir_cache_path)
+ if cache_path:
+ file_name = f"prefab.cache.{self.base_client.options.api_key_id}.json"
+ self.cache_path = os.path.join(cache_path, file_name)
@property
def cache_path(self):
- os.makedirs(os.path.dirname(self._cache_path), exist_ok=True)
+ if self._cache_path:
+ os.makedirs(os.path.dirname(self._cache_path), exist_ok=True)
return self._cache_path
@cache_path.setter
diff --git a/prefab_cloud_python/config_loader.py b/prefab_cloud_python/config_loader.py
index 7f942a5..f618e09 100644
--- a/prefab_cloud_python/config_loader.py
+++ b/prefab_cloud_python/config_loader.py
@@ -12,8 +12,8 @@ class ConfigLoader:
self.base_client = base_client
self.options = base_client.options
self.highwater_mark = 0
- self.classpath_config = self.__load_classpath_config()
- self.local_overrides = self.__load_local_overrides()
+ self.classpath_config = self.__load_classpath_config() or {}
+ self.local_overrides = self.__load_local_overrides() or {}
self.api_config = {}
def calc_config(self):
@@ -50,8 +50,9 @@ class ConfigLoader:
def __load_local_overrides(self):
if self.options.has_datafile():
return {}
- override_dir = self.options.prefab_config_override_dir
- return self.__load_config_from(override_dir)
+ if self.options.prefab_config_override_dir:
+ return self.__load_config_from(self.options.prefab_config_override_dir)
+ return {}
def __load_config_from(self, dir):
envs = self.options.prefab_envs
|
prefab-cloud/prefab-cloud-python
|
746b284749ee40ac8be9a85581f10eb9a913683d
|
diff --git a/tests/test_config_client.py b/tests/test_config_client.py
index b0e5842..3028724 100644
--- a/tests/test_config_client.py
+++ b/tests/test_config_client.py
@@ -1,5 +1,5 @@
from prefab_cloud_python import Options, Client
-from prefab_cloud_python.config_client import MissingDefaultException
+from prefab_cloud_python.config_client import MissingDefaultException, ConfigClient
import prefab_pb2 as Prefab
import pytest
import os
@@ -8,17 +8,61 @@ from contextlib import contextmanager
@contextmanager
-def extended_env(new_env_vars):
+def extended_env(new_env_vars, deleted_env_vars=[]):
old_env = os.environ.copy()
os.environ.update(new_env_vars)
+ for deleted_env_var in deleted_env_vars:
+ os.environ.pop(deleted_env_var, None)
yield
os.environ.clear()
os.environ.update(old_env)
+class ConfigClientFactoryFixture:
+ def __init__(self):
+ self.client = None
+
+ def create_config_client(self, options: Options) -> ConfigClient:
+ self.client = Client(options)
+ return self.client.config_client()
+
+ def close(self):
+ if self.client:
+ self.client.close()
+
+
[email protected]
+def config_client_factory():
+ factory_fixture = ConfigClientFactoryFixture()
+ yield factory_fixture
+ factory_fixture.close()
+
+
[email protected]
+def options():
+ def options(
+ on_no_default="RAISE",
+ x_use_local_cache=True,
+ prefab_envs=["unit_tests"],
+ api_key=None,
+ prefab_datasources="LOCAL_ONLY",
+ ):
+ return Options(
+ api_key=api_key,
+ prefab_config_classpath_dir="tests",
+ prefab_envs=prefab_envs,
+ prefab_datasources=prefab_datasources,
+ x_use_local_cache=x_use_local_cache,
+ on_no_default=on_no_default,
+ collect_sync_interval=None,
+ )
+
+ return options
+
+
class TestConfigClient:
- def test_get(self):
- config_client = self.build_config_client()
+ def test_get(self, config_client_factory, options):
+ config_client = config_client_factory.create_config_client(options())
assert config_client.get("sample") == "test sample value"
assert config_client.get("sample_int") == 123
@@ -26,13 +70,13 @@ class TestConfigClient:
assert config_client.get("sample_bool")
assert config_client.get("log-level.app") == Prefab.LogLevel.Value("ERROR")
- def test_get_with_default(self):
- config_client = self.build_config_client()
+ def test_get_with_default(self, config_client_factory, options):
+ config_client = config_client_factory.create_config_client(options())
assert config_client.get("bad key", "default value") == "default value"
- def test_get_without_default_raises(self):
- config_client = self.build_config_client()
+ def test_get_without_default_raises(self, config_client_factory, options):
+ config_client = config_client_factory.create_config_client(options())
with pytest.raises(MissingDefaultException) as exception:
config_client.get("bad key")
@@ -41,12 +85,16 @@ class TestConfigClient:
exception.value
)
- def test_get_without_default_returns_none_if_configured(self):
- config_client = self.build_config_client("RETURN_NONE")
+ def test_get_without_default_returns_none_if_configured(
+ self, config_client_factory, options
+ ):
+ config_client = config_client_factory.create_config_client(
+ options(on_no_default="RETURN_NONE")
+ )
assert config_client.get("bad key") is None
- def test_caching(self):
- config_client = self.build_config_client()
+ def test_caching(self, config_client_factory, options):
+ config_client = config_client_factory.create_config_client(options())
cached_config = Prefab.Configs(
configs=[
Prefab.Config(
@@ -72,39 +120,32 @@ class TestConfigClient:
config_client.load_cache()
assert config_client.get("test") == "test value"
- def test_cache_path(self):
- options = Options(
- api_key="123-API-KEY-SDK",
- x_use_local_cache=True,
- collect_sync_interval=None,
+ def test_cache_path(self, config_client_factory, options):
+ config_client = config_client_factory.create_config_client(
+ options(api_key="123-API-KEY-SDK", prefab_datasources="ALL")
)
- client = Client(options)
assert (
- client.config_client().cache_path
+ config_client.cache_path
== f"{os.environ['HOME']}/.cache/prefab.cache.123.json"
)
- def test_cache_path_local_only(self):
- config_client = self.build_config_client()
+ def test_cache_path_local_only(self, config_client_factory, options):
+ config_client = config_client_factory.create_config_client(
+ options(prefab_envs=[])
+ )
assert (
config_client.cache_path
== f"{os.environ['HOME']}/.cache/prefab.cache.local.json"
)
- def test_cache_path_respects_xdg(self):
+ def test_cache_path_local_only_with_no_home_dir_or_xdg(
+ self, config_client_factory, options
+ ):
+ with extended_env({}, deleted_env_vars=["HOME"]):
+ config_client = config_client_factory.create_config_client(options())
+ assert config_client.cache_path is None
+
+ def test_cache_path_respects_xdg(self, config_client_factory, options):
with extended_env({"XDG_CACHE_HOME": "/tmp"}):
- config_client = self.build_config_client()
+ config_client = config_client_factory.create_config_client(options())
assert config_client.cache_path == "/tmp/prefab.cache.local.json"
-
- @staticmethod
- def build_config_client(on_no_default="RAISE"):
- options = Options(
- prefab_config_classpath_dir="tests",
- prefab_envs="unit_tests",
- prefab_datasources="LOCAL_ONLY",
- x_use_local_cache=True,
- on_no_default=on_no_default,
- collect_sync_interval=None,
- )
- client = Client(options)
- return client.config_client()
|
Does not work great in AWS lambda (or any HOME-less environment) currently
Hey there,
we are trying to use prefab-cloud-python in a AWS lambda, and since those usually don't have a HOME directory defined, the following lines throw errors:
https://github.com/prefab-cloud/prefab-cloud-python/blob/746b284749ee40ac8be9a85581f10eb9a913683d/prefab_cloud_python/options.py#L59
https://github.com/prefab-cloud/prefab-cloud-python/blob/746b284749ee40ac8be9a85581f10eb9a913683d/prefab_cloud_python/config_client.py#L248
The latter even fails, when an XDG_CACHE_HOME variable is set in the environment since .get() is not used when trying to access HOME.
Our workaround is to set a HOME environment variable that points to /tmp for the lambdas, but I'm wondering if relying on HOME to be present in a server environment (serverless or not) is such a great idea?
|
0.0
|
746b284749ee40ac8be9a85581f10eb9a913683d
|
[
"tests/test_config_client.py::TestConfigClient::test_get",
"tests/test_config_client.py::TestConfigClient::test_get_with_default",
"tests/test_config_client.py::TestConfigClient::test_get_without_default_raises",
"tests/test_config_client.py::TestConfigClient::test_get_without_default_returns_none_if_configured",
"tests/test_config_client.py::TestConfigClient::test_cache_path_local_only_with_no_home_dir_or_xdg",
"tests/test_config_client.py::TestConfigClient::test_cache_path_respects_xdg"
] |
[] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2024-02-12 21:25:08+00:00
|
mit
| 4,678
|
|
mrf345__flask_minify-80
|
diff --git a/Makefile b/Makefile
index 608c314..1227923 100644
--- a/Makefile
+++ b/Makefile
@@ -6,9 +6,9 @@ c ?= 1
test: install
test -f .venv/bin/activate && source .venv/bin/activate && python -m bandit -c bandit.yml -r . && python -m pytest --count=$(c)
lint: install
- source .venv/bin/activate && python -m isort --profile black --check . && python -m black --check .
+ source .venv/bin/activate && python -m isort -sg "**/.venv*" --profile black --check . && python -m black --check .
format: install
- test -f .venv/bin/activate && source .venv/bin/activate && python -m isort --profile black . && python -m black .
+ test -f .venv/bin/activate && source .venv/bin/activate && python -m isort -sg "**/.venv*" --profile black . && python -m black .
run: install
python tests/integration.py
release: install-dev clean
diff --git a/README.md b/README.md
index 1d17a44..29fdd1b 100644
--- a/README.md
+++ b/README.md
@@ -149,6 +149,10 @@ the **default** parsers are set to `{"html": Html, "script": Jsmin, "style": Rcs
## Breaking changes
+#### `0.40`
+
+Due to a future deprecation in Flask 2.3, the extension is no longer going to fallback to `Flask._app_ctx_stack`, it will raise an exception instead (`flask_minify.exceptions.MissingApp`)
+
#### `0.33`
introduces a breaking change to the expected output, in this release `lesscpy` will be replaced by `cssmin` as
diff --git a/flask_minify/about.py b/flask_minify/about.py
index 7071428..ef4cfb0 100644
--- a/flask_minify/about.py
+++ b/flask_minify/about.py
@@ -1,4 +1,4 @@
-__version__ = "0.39"
+__version__ = "0.40"
__doc__ = "Flask extension to minify html, css, js and less."
__license__ = "MIT"
__author__ = "Mohamed Feddad"
diff --git a/flask_minify/exceptions.py b/flask_minify/exceptions.py
new file mode 100644
index 0000000..3c99cd0
--- /dev/null
+++ b/flask_minify/exceptions.py
@@ -0,0 +1,10 @@
+class FlaskMinifyException(Exception):
+ """FlaskMinify base exception"""
+
+ pass
+
+
+class MissingApp(FlaskMinifyException):
+ """Raised when the flask app is accessed before it's set"""
+
+ pass
diff --git a/flask_minify/main.py b/flask_minify/main.py
index 9dba299..8fc30fe 100644
--- a/flask_minify/main.py
+++ b/flask_minify/main.py
@@ -1,9 +1,10 @@
from itertools import tee
from re import compile as compile_re
-from flask import _app_ctx_stack, request
+from flask import request
from flask_minify.cache import MemoryCache
+from flask_minify.exceptions import MissingApp
from flask_minify.parsers import Parser
from flask_minify.utils import does_content_type_match
@@ -112,14 +113,24 @@ class Minify:
@property
def app(self):
- """If app was passed take it, if not get the one on top.
+ """If app was passed take it, otherwise raise an exception.
Returns
-------
Flask App
The current Flask application.
+
+ Raises
+ ------
+ MissingApp
"""
- return self._app or (_app_ctx_stack.top and _app_ctx_stack.top.app)
+ if not self._app:
+ raise MissingApp(
+ "Flask app has not been passed to the extension `Minify(app=None)`, "
+ "nor lazy initialized with `.init_app(app)`"
+ )
+
+ return self._app
def init_app(self, app):
"""Handle initiation of multiple apps NOTE:Factory Method"""
|
mrf345/flask_minify
|
4b48318cb3a6eac55b76f59d197dc8c895b00392
|
diff --git a/tests/units.py b/tests/units.py
index ddd1aa5..97606da 100644
--- a/tests/units.py
+++ b/tests/units.py
@@ -1,8 +1,10 @@
-from random import randint
from unittest import mock
+import pytest
+
from flask_minify import minify, parsers
from flask_minify.cache import MemoryCache
+from flask_minify.exceptions import MissingApp
from flask_minify.utils import does_content_type_match, is_empty
from .constants import (
@@ -87,6 +89,14 @@ class TestMinifyRequest:
assert (list(matches), exists) == ([], False)
+ def test_access_missing_app_raises_exception(self):
+ """test accessing a missing flask app raises an exception"""
+ self.mock_app = None
+ ext = self.minify_defaults
+
+ with pytest.raises(MissingApp):
+ ext.app
+
class TestParsers:
def test_css_edge_cases_with_rcssmin(self):
|
DeprecationWarning: `_app_ctx_stack` is deprecated and will be removed in Flask `2.3`. Use `g` to store data, or `app_ctx` to access the current context.
Please address this deprecation warning in next release.
```
/venv/lib/python3.9/site-packages/flask_minify/main.py:122: DeprecationWarning: '_app_ctx_stack' is deprecated and will be removed in Flask 2.3. Use 'g' to store data, or 'app_ctx' to access the current context.
return self._app or (_app_ctx_stack.top and _app_ctx_stack.top.app)
```
|
0.0
|
4b48318cb3a6eac55b76f59d197dc8c895b00392
|
[
"tests/units.py::TestUtils::test_is_empty",
"tests/units.py::TestUtils::test_is_html",
"tests/units.py::TestUtils::test_is_js",
"tests/units.py::TestUtils::test_is_cssless",
"tests/units.py::TestParsers::test_css_edge_cases_with_rcssmin",
"tests/units.py::TestParsers::test_overriding_parser_options"
] |
[] |
{
"failed_lite_validators": [
"has_added_files",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-10-02 20:04:41+00:00
|
mit
| 4,063
|
|
m0nhawk__grafana_api-43
|
diff --git a/grafana_api/grafana_api.py b/grafana_api/grafana_api.py
index e965768..bb7cc5e 100644
--- a/grafana_api/grafana_api.py
+++ b/grafana_api/grafana_api.py
@@ -56,9 +56,11 @@ class GrafanaAPI:
url_path_prefix="",
protocol="http",
verify=True,
+ timeout=5.0,
):
self.auth = auth
self.verify = verify
+ self.timeout = timeout
self.url_host = host
self.url_port = port
self.url_path_prefix = url_path_prefix
@@ -92,7 +94,7 @@ class GrafanaAPI:
__url = "%s%s" % (self.url, url)
runner = getattr(self.s, item.lower())
r = runner(
- __url, json=json, headers=headers, auth=self.auth, verify=self.verify
+ __url, json=json, headers=headers, auth=self.auth, verify=self.verify, timeout=self.timeout
)
if 500 <= r.status_code < 600:
raise GrafanaServerError(
diff --git a/grafana_api/grafana_face.py b/grafana_api/grafana_face.py
index dcc8667..f9fe53b 100644
--- a/grafana_api/grafana_face.py
+++ b/grafana_api/grafana_face.py
@@ -24,6 +24,7 @@ class GrafanaFace:
url_path_prefix="",
protocol="http",
verify=True,
+ timeout=5.0,
):
self.api = GrafanaAPI(
auth,
@@ -32,6 +33,7 @@ class GrafanaFace:
url_path_prefix=url_path_prefix,
protocol=protocol,
verify=verify,
+ timeout=timeout,
)
self.admin = Admin(self.api)
self.dashboard = Dashboard(self.api)
|
m0nhawk/grafana_api
|
b5f1266273fb836580b03224456843b043089814
|
diff --git a/test/test_grafana.py b/test/test_grafana.py
index d4affad..1c8ede3 100644
--- a/test/test_grafana.py
+++ b/test/test_grafana.py
@@ -67,8 +67,22 @@ class TestGrafanaAPI(unittest.TestCase):
headers=None,
json=None,
verify=False,
+ timeout=5.0,
)
+ def test_grafana_api_timeout(self):
+ cli = GrafanaFace(
+ ("admin", "admin"),
+ host="play.grafana.org",
+ url_path_prefix="",
+ protocol="https",
+ verify=False,
+ timeout=0.0001
+ )
+
+ with self.assertRaises(requests.exceptions.Timeout):
+ cli.folder.get_all_folders()
+
def test_grafana_api_basic_auth(self):
cli = GrafanaFace(
("admin", "admin"), host="localhost", url_path_prefix="", protocol="https",port="3000"
|
Missing timeouts
**Describe the bug**
The requests never timeout, this is not a good idea in general
**Expected behavior**
The user should be able to set one and there should be a default (maybe 10s)
|
0.0
|
b5f1266273fb836580b03224456843b043089814
|
[
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_no_verify",
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_timeout"
] |
[
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api",
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_basic_auth",
"test/test_grafana.py::TestGrafanaAPI::test_grafana_api_token_auth"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks",
"has_pytest_match_arg"
],
"has_test_patch": true,
"is_lite": false
}
|
2019-09-09 02:16:17+00:00
|
mit
| 3,662
|
|
web2py__pydal-349
|
diff --git a/pydal/dialects/base.py b/pydal/dialects/base.py
index 89e261f9..fb058fe2 100644
--- a/pydal/dialects/base.py
+++ b/pydal/dialects/base.py
@@ -399,7 +399,8 @@ class SQLDialect(CommonDialect):
return ''
def coalesce(self, first, second):
- expressions = [self.expand(first)]+[self.expand(e) for e in second]
+ expressions = [self.expand(first)] + \
+ [self.expand(val, first.type) for val in second]
return 'COALESCE(%s)' % ','.join(expressions)
def raw(self, val):
diff --git a/pydal/dialects/sqlite.py b/pydal/dialects/sqlite.py
index 0af56176..078d5c1a 100644
--- a/pydal/dialects/sqlite.py
+++ b/pydal/dialects/sqlite.py
@@ -28,6 +28,15 @@ class SQLiteDialect(SQLDialect):
return '(%s REGEXP %s)' % (
self.expand(first), self.expand(second, 'string'))
+ def select(self, fields, tables, where=None, groupby=None, having=None,
+ orderby=None, limitby=None, distinct=False, for_update=False):
+ if distinct and distinct is not True:
+ raise SyntaxError(
+ 'DISTINCT ON is not supported by SQLite')
+ return super(SQLiteDialect, self).select(
+ fields, tables, where, groupby, having, orderby, limitby, distinct,
+ for_update)
+
def truncate(self, table, mode=''):
tablename = table._tablename
return [
diff --git a/pydal/objects.py b/pydal/objects.py
index c159c181..59d6f471 100644
--- a/pydal/objects.py
+++ b/pydal/objects.py
@@ -1462,6 +1462,8 @@ class Field(Expression, Serializable):
return field
def store(self, file, filename=None, path=None):
+ # make sure filename is a str sequence
+ filename = "{}".format(filename)
if self.custom_store:
return self.custom_store(file, filename, path)
if isinstance(file, cgi.FieldStorage):
@@ -1474,7 +1476,8 @@ class Field(Expression, Serializable):
m = REGEX_STORE_PATTERN.search(filename)
extension = m and m.group('e') or 'txt'
uuid_key = self._db.uuid().replace('-', '')[-16:]
- encoded_filename = base64.b16encode(filename).lower()
+ encoded_filename = base64.b16encode(
+ filename.encode('utf-8')).lower().decode('utf-8')
newfilename = '%s.%s.%s.%s' % (
self._tablename, self.name, uuid_key, encoded_filename)
newfilename = newfilename[:(self.length - 1 - len(extension))] + \
@@ -1486,27 +1489,27 @@ class Field(Expression, Serializable):
blob_uploadfield_name: file.read()}
self_uploadfield.table.insert(**keys)
elif self_uploadfield is True:
- if path:
- pass
- elif self.uploadfolder:
- path = self.uploadfolder
- elif self.db._adapter.folder:
- path = pjoin(self.db._adapter.folder, '..', 'uploads')
- else:
- raise RuntimeError(
- "you must specify a Field(..., uploadfolder=...)")
- if self.uploadseparate:
- if self.uploadfs:
- raise RuntimeError("not supported")
- path = pjoin(path, "%s.%s" % (
- self._tablename, self.name), uuid_key[:2]
- )
- if not exists(path):
- os.makedirs(path)
- pathfilename = pjoin(path, newfilename)
if self.uploadfs:
dest_file = self.uploadfs.open(newfilename, 'wb')
else:
+ if path:
+ pass
+ elif self.uploadfolder:
+ path = self.uploadfolder
+ elif self.db._adapter.folder:
+ path = pjoin(self.db._adapter.folder, '..', 'uploads')
+ else:
+ raise RuntimeError(
+ "you must specify a Field(..., uploadfolder=...)")
+ if self.uploadseparate:
+ if self.uploadfs:
+ raise RuntimeError("not supported")
+ path = pjoin(path, "%s.%s" % (
+ self._tablename, self.name), uuid_key[:2]
+ )
+ if not exists(path):
+ os.makedirs(path)
+ pathfilename = pjoin(path, newfilename)
dest_file = open(pathfilename, 'wb')
try:
shutil.copyfileobj(file, dest_file)
@@ -1563,7 +1566,7 @@ class Field(Expression, Serializable):
return self.custom_retrieve_file_properties(name, path)
if m.group('name'):
try:
- filename = base64.b16decode(m.group('name'), True)
+ filename = base64.b16decode(m.group('name'), True).decode('utf-8')
filename = REGEX_CLEANUP_FN.sub('_', filename)
except (TypeError, AttributeError):
filename = name
diff --git a/setup.py b/setup.py
index f99ee9af..d4d69a06 100644
--- a/setup.py
+++ b/setup.py
@@ -38,7 +38,9 @@ setup(
maintainer_email='[email protected]',
description='a pure Python Database Abstraction Layer',
long_description=__doc__,
- packages=['pydal', 'pydal.adapters', 'pydal.helpers', 'pydal.contrib'],
+ packages=[
+ 'pydal', 'pydal.adapters', 'pydal.dialects', 'pydal.helpers',
+ 'pydal.parsers', 'pydal.representers', 'pydal.contrib'],
include_package_data=True,
zip_safe=False,
platforms='any',
|
web2py/pydal
|
d59b588900f26e6e204fb119115efa91fe7db692
|
diff --git a/tests/sql.py b/tests/sql.py
index 2573f3a3..c118e372 100644
--- a/tests/sql.py
+++ b/tests/sql.py
@@ -149,6 +149,74 @@ class TestFields(unittest.TestCase):
else:
isinstance(f.formatter(datetime.datetime.now()), str)
+ def testUploadField(self):
+ import tempfile
+
+ stream = tempfile.NamedTemporaryFile()
+ content = b"this is the stream content"
+ stream.write(content)
+ # rewind before inserting
+ stream.seek(0)
+
+
+ db = DAL(DEFAULT_URI, check_reserved=['all'])
+ db.define_table('tt', Field('fileobj', 'upload',
+ uploadfolder=tempfile.gettempdir(),
+ autodelete=True))
+ f_id = db.tt.insert(fileobj=stream)
+
+ row = db.tt[f_id]
+ (retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj)
+
+ # name should be the same
+ self.assertEqual(retr_name, os.path.basename(stream.name))
+ # content should be the same
+ retr_content = retr_stream.read()
+ self.assertEqual(retr_content, content)
+
+ # close streams!
+ retr_stream.close()
+
+ # delete
+ row.delete_record()
+
+ # drop
+ db.tt.drop()
+
+ # this part is triggered only if fs (AKA pyfilesystem) module is installed
+ try:
+ from fs.memoryfs import MemoryFS
+
+ # rewind before inserting
+ stream.seek(0)
+ db.define_table('tt', Field('fileobj', 'upload',
+ uploadfs=MemoryFS(),
+ autodelete=True))
+
+ f_id = db.tt.insert(fileobj=stream)
+
+ row = db.tt[f_id]
+ (retr_name, retr_stream) = db.tt.fileobj.retrieve(row.fileobj)
+
+ # name should be the same
+ self.assertEqual(retr_name, os.path.basename(stream.name))
+ # content should be the same
+ retr_content = retr_stream.read()
+ self.assertEqual(retr_content, content)
+
+ # close streams
+ retr_stream.close()
+ stream.close()
+
+ # delete
+ row.delete_record()
+
+ # drop
+ db.tt.drop()
+
+ except ImportError:
+ pass
+
def testRun(self):
"""Test all field types and their return values"""
db = DAL(DEFAULT_URI, check_reserved=['all'])
|
coalesce() incorrectly expands constant values
When you pass string constant into coalesce, it will be expanded as identifier instead of string constant:
`db().select(db.table.str_field.coalesce('foo'))`
would expand into this:
`SELECT COALESCE(table.str_field,foo) FROM table`
but the expected behavior is this:
`SELECT COALESCE(table.str_field,'foo') FROM table`
|
0.0
|
d59b588900f26e6e204fb119115efa91fe7db692
|
[
"tests/sql.py::TestFields::testUploadField"
] |
[
"tests/sql.py::TestFields::testFieldFormatters",
"tests/sql.py::TestFields::testFieldLabels",
"tests/sql.py::TestFields::testFieldName",
"tests/sql.py::TestFields::testFieldTypes",
"tests/sql.py::TestFields::testRun",
"tests/sql.py::TestTables::testTableNames",
"tests/sql.py::TestAll::testSQLALL",
"tests/sql.py::TestTable::testTableAlias",
"tests/sql.py::TestTable::testTableCreation",
"tests/sql.py::TestTable::testTableInheritance",
"tests/sql.py::TestInsert::testRun",
"tests/sql.py::TestSelect::testCoalesce",
"tests/sql.py::TestSelect::testGroupByAndDistinct",
"tests/sql.py::TestSelect::testListInteger",
"tests/sql.py::TestSelect::testListReference",
"tests/sql.py::TestSelect::testListString",
"tests/sql.py::TestSelect::testRun",
"tests/sql.py::TestSelect::testTestQuery",
"tests/sql.py::TestAddMethod::testRun",
"tests/sql.py::TestBelongs::testRun",
"tests/sql.py::TestContains::testRun",
"tests/sql.py::TestLike::testEscaping",
"tests/sql.py::TestLike::testLikeInteger",
"tests/sql.py::TestLike::testRegexp",
"tests/sql.py::TestLike::testRun",
"tests/sql.py::TestLike::testStartsEndsWith",
"tests/sql.py::TestLike::testUpperLower",
"tests/sql.py::TestDatetime::testRun",
"tests/sql.py::TestExpressions::testOps",
"tests/sql.py::TestExpressions::testRun",
"tests/sql.py::TestExpressions::testSubstring",
"tests/sql.py::TestExpressions::testUpdate",
"tests/sql.py::TestJoin::testRun",
"tests/sql.py::TestMinMaxSumAvg::testRun",
"tests/sql.py::TestMigrations::testRun",
"tests/sql.py::TestReference::testRun",
"tests/sql.py::TestClientLevelOps::testRun",
"tests/sql.py::TestVirtualFields::testRun",
"tests/sql.py::TestComputedFields::testRun",
"tests/sql.py::TestCommonFilters::testRun",
"tests/sql.py::TestImportExportFields::testRun",
"tests/sql.py::TestImportExportUuidFields::testRun",
"tests/sql.py::TestDALDictImportExport::testRun",
"tests/sql.py::TestSelectAsDict::testSelect",
"tests/sql.py::TestRNameTable::testJoin",
"tests/sql.py::TestRNameTable::testSelect",
"tests/sql.py::TestRNameFields::testInsert",
"tests/sql.py::TestRNameFields::testJoin",
"tests/sql.py::TestRNameFields::testRun",
"tests/sql.py::TestRNameFields::testSelect",
"tests/sql.py::TestQuoting::testCase",
"tests/sql.py::TestQuoting::testPKFK",
"tests/sql.py::TestTableAndFieldCase::testme",
"tests/sql.py::TestQuotesByDefault::testme",
"tests/sql.py::TestGis::testGeometry",
"tests/sql.py::TestGis::testGeometryCase",
"tests/sql.py::TestGis::testGisMigration",
"tests/sql.py::TestSQLCustomType::testRun",
"tests/sql.py::TestLazy::testLazyGetter",
"tests/sql.py::TestLazy::testRowExtra",
"tests/sql.py::TestLazy::testRowNone",
"tests/sql.py::TestLazy::testRun",
"tests/sql.py::TestRedefine::testRun",
"tests/sql.py::TestUpdateInsert::testRun",
"tests/sql.py::TestBulkInsert::testRun",
"tests/sql.py::TestRecordVersioning::testRun",
"tests/sql.py::TestSerializers::testAsJson"
] |
{
"failed_lite_validators": [
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2016-04-21 22:23:30+00:00
|
bsd-3-clause
| 6,237
|
|
lapix-ufsc__lapixdl-43
|
diff --git a/lapixdl/formats/annotation.py b/lapixdl/formats/annotation.py
index 919f70a..a0e2eb1 100644
--- a/lapixdl/formats/annotation.py
+++ b/lapixdl/formats/annotation.py
@@ -26,6 +26,16 @@ class BBox:
cls: int
score: float | None = None
+ def __post_init__(self):
+ if self.upper_left_x < 0 or self.upper_left_y < 0:
+ raise ValueError(f'The upper left (x, y) should be positive values. Got ({self.upper_left_x}, {self.upper_left_y})')
+
+ if self.width <= 0:
+ raise ValueError(f'The width should be bigger than zero. Got {self.width}')
+
+ if self.height <= 0:
+ raise ValueError(f'The height should be bigger than zero. Got {self.height}')
+
@property
def upper_left_point(self) -> tuple[int, int]:
"""Tuple[int, int]: (X,Y) of the upper left point of the Bounding Box."""
|
lapix-ufsc/lapixdl
|
354ad05a93680744e0c0d3a9345fc05d23ca6f79
|
diff --git a/tests/formats/annotation_test.py b/tests/formats/annotation_test.py
index aa25429..202ba3a 100644
--- a/tests/formats/annotation_test.py
+++ b/tests/formats/annotation_test.py
@@ -28,6 +28,20 @@ def test_bbox():
assert bbox.slice_y == slice(0, 14)
+def test_invalid_bbox():
+ with pytest.raises(ValueError):
+ BBox(0, -1, 1, 1, 0)
+
+ with pytest.raises(ValueError):
+ BBox(-1, 0, 1, 1, 0)
+
+ with pytest.raises(ValueError):
+ BBox(0, 0, 0, 1, 0)
+
+ with pytest.raises(ValueError):
+ BBox(0, 0, 1, 0, 0)
+
+
def test_bbox_intersection_and_union_area_with():
bbox_A = BBox(0, 0, 10, 15, 0)
bbox_B = BBox(5, 5, 20, 25, 0)
|
Division by zero at calculate_bbox_iou
I can't reproduce the error with tests, but, some moments at https://github.com/lapix-ufsc/lapixdl/blob/ee3faf20b2beab7bbb794328f724b7b8044ac1b1/lapixdl/evaluation/evaluate.py#L211
The union it's equals zero and raise the error. To solve just need:
```python
if union_area == 0:
return 0
```
I don't PR this because I can't reproduce this when i try
|
0.0
|
354ad05a93680744e0c0d3a9345fc05d23ca6f79
|
[
"tests/formats/annotation_test.py::test_invalid_bbox"
] |
[
"tests/formats/annotation_test.py::test_bbox",
"tests/formats/annotation_test.py::test_bbox_intersection_and_union_area_with",
"tests/formats/annotation_test.py::test_bbox_to_polygon",
"tests/formats/annotation_test.py::test_bounds_to_bbox",
"tests/formats/annotation_test.py::test_annotation_bbox",
"tests/formats/annotation_test.py::test_annotation_geo_type",
"tests/formats/annotation_test.py::test_annotation_xywh_bbox",
"tests/formats/annotation_test.py::test_annotation_iter",
"tests/formats/annotation_test.py::test_annotation_iter_wrong_geo",
"tests/formats/annotation_test.py::test_annotation_copy"
] |
{
"failed_lite_validators": [],
"has_test_patch": true,
"is_lite": true
}
|
2022-10-26 13:21:23+00:00
|
mit
| 3,502
|
|
Clinical-Genomics__scout-656
|
diff --git a/scout/build/panel.py b/scout/build/panel.py
index 1a4dfa0fd..85b02bfef 100644
--- a/scout/build/panel.py
+++ b/scout/build/panel.py
@@ -44,7 +44,7 @@ def build_gene(gene_info, adapter):
hgnc_gene = adapter.hgnc_gene(hgnc_id)
if hgnc_gene is None:
- raise IntegrityError
+ raise IntegrityError("hgnc_id {0} is not in the gene database!".format(hgnc_id))
gene_obj['symbol'] = hgnc_gene['hgnc_symbol']
diff --git a/scout/commands/load/panel.py b/scout/commands/load/panel.py
index 1aa029868..0569e2e13 100644
--- a/scout/commands/load/panel.py
+++ b/scout/commands/load/panel.py
@@ -74,5 +74,5 @@ def panel(context, date, name, version, panel_type, panel_id, path, institute):
try:
adapter.load_panel(info)
except IntegrityError as e:
- logger.warning(e)
+ log.warning(e)
context.abort()
diff --git a/scout/parse/variant/coordinates.py b/scout/parse/variant/coordinates.py
index fa24acf8c..5031d6b0a 100644
--- a/scout/parse/variant/coordinates.py
+++ b/scout/parse/variant/coordinates.py
@@ -19,7 +19,20 @@ def get_cytoband_coordinates(chrom, pos):
return coordinate
def get_sub_category(alt_len, ref_len, category, svtype=None):
- """Get the subcategory"""
+ """Get the subcategory for a VCF variant
+
+ The sub categories are:
+ 'snv', 'indel', 'del', 'ins', 'dup', 'bnd', 'inv'
+
+ Args:
+ alt_len(int)
+ ref_len(int)
+ category(str)
+ svtype(str)
+
+ Returns:
+ subcategory(str)
+ """
subcategory = ''
if category in ('snv', 'indel', 'cancer'):
@@ -32,99 +45,147 @@ def get_sub_category(alt_len, ref_len, category, svtype=None):
return subcategory
-def get_length(alt_len, ref_len, category, svtype=None, svlen=None):
- """docstring for get_length"""
+def get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None):
+ """Return the length of a variant
+
+ Args:
+ alt_len(int)
+ ref_len(int)
+ category(str)
+ svtype(str)
+ svlen(int)
+ """
+ # -1 would indicate uncertain length
+ length = -1
if category in ('snv', 'indel', 'cancer'):
if ref_len == alt_len:
length = alt_len
else:
- length = abs(ref_len-alt_len)
+ length = abs(ref_len - alt_len)
+
elif category == 'sv':
if svtype == 'bnd':
length = int(10e10)
else:
if svlen:
length = abs(int(svlen))
- else:
- # -1 would indicate uncertain length
- length = -1
+ # Some software does not give a length but they give END
+ elif end:
+ if end != pos:
+ length = end - pos
return length
-def get_end(pos, length, alt, category, svtype=None):
- """docstring for get_length"""
- end = None
+def get_end(pos, alt, category, snvend=None, svend=None, svlen=None):
+ """Return the end coordinate for a variant
+
+ Args:
+ pos(int)
+ alt(str)
+ category(str)
+ snvend(str)
+ svend(int)
+ svlen(int)
+
+ Returns:
+ end(int)
+ """
+ # If nothing is known we set end to be same as start
+ end = pos
+ # If variant is snv or indel we know that cyvcf2 can handle end pos
if category in ('snv', 'indel', 'cancer'):
- end = pos + length
+ end = snvend
+ # With SVs we have to be a bit more careful
elif category == 'sv':
+ # The END field from INFO usually works fine
+ end = svend
+
+ # For some cases like insertions the callers set end to same as pos
+ # In those cases we can hope that there is a svlen...
+ if svend == pos:
+ if svlen:
+ end = pos + svlen
+ # If variant is 'BND' they have ':' in alt field
+ # Information about other end is in the alt field
if ':' in alt:
other_coordinates = alt.strip('ACGTN[]').split(':')
# For BND end will represent the end position of the other end
try:
end = int(other_coordinates[1])
except ValueError as err:
- end = pos + length
- else:
- end = pos + length
-
- return end
+ pass
+ return end
-def parse_coordinates(chrom, ref, alt, position, category, svtype, svlen, end, mate_id=None):
+def parse_coordinates(variant, category):
"""Find out the coordinates for a variant
Args:
- chrom(str)
- ref(str)
- alt(str)
- position(int)
- category(str)
- svtype(str)
- svlen(int)
- end(int)
- mate_id(str)
+ variant(cyvcf2.Variant)
Returns:
coordinates(dict): A dictionary on the form:
{
+ 'position':<int>,
'end':<int>,
+ 'end_chrom':<str>,
'length':<int>,
'sub_category':<str>,
'mate_id':<str>,
+ 'cytoband_start':<str>,
+ 'cytoband_end':<str>,
}
"""
- coordinates = {
- 'end': end,
- 'length': None,
- 'sub_category': None,
- 'mate_id':None,
- 'cytoband_start':None,
- 'cytoband_end':None,
- 'end_chrom':None,
- }
+ ref = variant.REF
+ alt = variant.ALT[0]
+ chrom = variant.CHROM
+ if (chrom.startswith('chr') or chrom.startswith('CHR')):
+ chrom = chrom[3:]
+
+ svtype = variant.INFO.get('SVTYPE')
if svtype:
svtype = svtype.lower()
+ mate_id = variant.INFO.get('MATEID')
+
+ svlen = variant.INFO.get('SVLEN')
+
+ svend = variant.INFO.get('END')
+ snvend = int(variant.end)
+
+ position = int(variant.POS)
+
ref_len = len(ref)
alt_len = len(alt)
- coordinates['mate_id'] = mate_id
- coordinates['sub_category'] = get_sub_category(alt_len, ref_len, category, svtype)
- coordinates['length'] = get_length(alt_len, ref_len, category, svtype, svlen)
- coordinates['end'] = get_end(position, coordinates['length'], alt, category, svtype)
- coordinates['end_chrom'] = chrom
-
- if coordinates['sub_category'] == 'bnd':
- if ':' in alt:
- other_coordinates = alt.strip('ACGTN[]').split(':')
- # BND will often be translocations between different chromosomes
- other_chrom = other_coordinates[0]
- coordinates['end_chrom'] = other_coordinates[0].lstrip('chrCHR')
-
- coordinates['cytoband_start'] = get_cytoband_coordinates(
- chrom, position
- )
- coordinates['cytoband_end'] = get_cytoband_coordinates(
- coordinates['end_chrom'], coordinates['end']
- )
+
+ sub_category = get_sub_category(alt_len, ref_len, category, svtype)
+ end = get_end(position, alt, category, snvend, svend)
+
+ length = get_length(alt_len, ref_len, category, position, end, svtype, svlen)
+ end_chrom = chrom
+
+ if sub_category == 'bnd':
+ if ':' in alt:
+ other_coordinates = alt.strip('ACGTN[]').split(':')
+ # BND will often be translocations between different chromosomes
+ other_chrom = other_coordinates[0]
+ if (other_chrom.startswith('chr') or other_chrom.startswith('CHR')):
+ other_chrom = other_chrom[3:]
+ end_chrom = other_chrom
+
+ cytoband_start = get_cytoband_coordinates(chrom, position)
+ cytoband_end = get_cytoband_coordinates(end_chrom, end)
+
+ coordinates = {
+ 'position': position,
+ 'end': end,
+ 'length': length,
+ 'sub_category': sub_category,
+ 'mate_id': mate_id,
+ 'cytoband_start': cytoband_start,
+ 'cytoband_end': cytoband_end,
+ 'end_chrom': end_chrom,
+ }
+
return coordinates
diff --git a/scout/parse/variant/variant.py b/scout/parse/variant/variant.py
index 625c8901d..de16616c1 100644
--- a/scout/parse/variant/variant.py
+++ b/scout/parse/variant/variant.py
@@ -81,20 +81,19 @@ def parse_variant(variant, case, variant_type='clinical',
category = 'snv'
parsed_variant['category'] = category
- #sub category is 'snv', 'indel', 'del', 'ins', 'dup', 'inv', 'cnv'
- # 'snv' and 'indel' are subcatogories of snv
- parsed_variant['sub_category'] = None
################# General information #################
parsed_variant['reference'] = variant.REF
- # We allways assume splitted and normalized vcfs
+
+ ### We allways assume splitted and normalized vcfs!!!
if len(variant.ALT) > 1:
raise VcfError("Variants are only allowed to have one alternative")
parsed_variant['alternative'] = variant.ALT[0]
# cyvcf2 will set QUAL to None if '.' in vcf
parsed_variant['quality'] = variant.QUAL
+
if variant.FILTER:
parsed_variant['filters'] = variant.FILTER.split(';')
else:
@@ -109,33 +108,14 @@ def parse_variant(variant, case, variant_type='clinical',
################# Position specific #################
parsed_variant['chromosome'] = chrom
- # position = start
- parsed_variant['position'] = int(variant.POS)
- svtype = variant.INFO.get('SVTYPE')
-
- svlen = variant.INFO.get('SVLEN')
-
- end = int(variant.end)
-
- mate_id = variant.INFO.get('MATEID')
-
- coordinates = parse_coordinates(
- chrom=parsed_variant['chromosome'],
- ref=parsed_variant['reference'],
- alt=parsed_variant['alternative'],
- position=parsed_variant['position'],
- category=parsed_variant['category'],
- svtype=svtype,
- svlen=svlen,
- end=end,
- mate_id=mate_id,
- )
+ coordinates = parse_coordinates(variant, category)
+ parsed_variant['position'] = coordinates['position']
parsed_variant['sub_category'] = coordinates['sub_category']
parsed_variant['mate_id'] = coordinates['mate_id']
- parsed_variant['end'] = int(coordinates['end'])
- parsed_variant['length'] = int(coordinates['length'])
+ parsed_variant['end'] = coordinates['end']
+ parsed_variant['length'] = coordinates['length']
parsed_variant['end_chrom'] = coordinates['end_chrom']
parsed_variant['cytoband_start'] = coordinates['cytoband_start']
parsed_variant['cytoband_end'] = coordinates['cytoband_end']
|
Clinical-Genomics/scout
|
e41d7b94106581fa28da793e2ab19c466e2f2f5a
|
diff --git a/tests/parse/test_parse_coordinates.py b/tests/parse/test_parse_coordinates.py
new file mode 100644
index 000000000..791148fb3
--- /dev/null
+++ b/tests/parse/test_parse_coordinates.py
@@ -0,0 +1,253 @@
+from scout.parse.variant.coordinates import (get_cytoband_coordinates, get_sub_category,
+ get_length, get_end, parse_coordinates)
+
+
+class CyvcfVariant(object):
+ """Mock a cyvcf variant
+
+ Default is to return a variant with three individuals high genotype
+ quality.
+ """
+ def __init__(self, chrom='1', pos=80000, ref='A', alt='C', end=None,
+ gt_quals=[60, 60, 60], gt_types=[1, 1, 0], var_type='snv',
+ info_dict={}):
+ super(CyvcfVariant, self).__init__()
+ self.CHROM = chrom
+ self.POS = pos
+ self.REF = ref
+ self.ALT = [alt]
+ self.end = end or pos
+ self.gt_quals = gt_quals
+ self.gt_types = gt_types
+ self.var_type = var_type
+ self.INFO = info_dict
+
+
+def test_parse_coordinates_snv():
+ variant = CyvcfVariant()
+
+ coordinates = parse_coordinates(variant, 'snv')
+
+ assert coordinates['position'] == variant.POS
+
+def test_parse_coordinates_indel():
+ variant = CyvcfVariant(alt='ACCC', end=80003)
+
+ coordinates = parse_coordinates(variant, 'snv')
+
+ assert coordinates['position'] == variant.POS
+ assert coordinates['end'] == variant.end
+
+def test_parse_coordinates_translocation():
+ info_dict = {
+ 'SVTYPE': 'BND',
+ }
+ variant = CyvcfVariant(
+ ref='N',
+ alt='N[hs37d5:12060532[',
+ pos=724779,
+ end=724779,
+ var_type='sv',
+ info_dict=info_dict,
+ )
+
+ coordinates = parse_coordinates(variant, 'sv')
+
+ assert coordinates['position'] == variant.POS
+ assert coordinates['end'] == 12060532
+ assert coordinates['end_chrom'] == 'hs37d5'
+ assert coordinates['length'] == 10e10
+ assert coordinates['sub_category'] == 'bnd'
+
+
+###### parse subcategory #######
+def test_get_subcategory_snv():
+ alt_len = 1
+ ref_len = 1
+ category = 'snv'
+ svtype = None
+
+ sub_category = get_sub_category(alt_len, ref_len, category, svtype)
+
+ assert sub_category == 'snv'
+
+def test_get_subcategory_indel():
+ alt_len = 1
+ ref_len = 3
+ category = 'snv'
+ svtype = None
+
+ sub_category = get_sub_category(alt_len, ref_len, category, svtype)
+
+ assert sub_category == 'indel'
+
+###### parse length #######
+
+# get_length(alt_len, ref_len, category, pos, end, svtype=None, svlen=None)
+def test_get_length_snv():
+ alt_len = 1
+ ref_len = 1
+ category = 'snv'
+ pos = end = 879537
+
+ length = get_length(alt_len, ref_len, category, pos, end)
+
+ assert length == 1
+
+def test_get_length_indel():
+ alt_len = 3
+ ref_len = 1
+ category = 'snv'
+ pos = end = 879537
+
+ length = get_length(alt_len, ref_len, category, pos, end)
+
+ assert length == 2
+
+def test_get_sv_length_small_ins():
+ ## GIVEN an insertion with whole sequence in alt field
+ alt_len = 296
+ ref_len = 1
+ category = 'sv'
+ # Pos and end is same for insertions
+ pos = end = 144343218
+ svtype = 'ins'
+ svlen = 296
+
+ ## WHEN parsing the length
+ length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
+
+ ## THEN assert that the length is correct
+ assert length == 296
+
+def test_get_sv_length_large_ins_no_length():
+ ## GIVEN an imprecise insertion
+ alt_len = 5
+ ref_len = 1
+ category = 'sv'
+ # Pos and end is same for insertions
+ pos = end = 133920667
+ svtype = 'ins'
+ svlen = None
+
+ ## WHEN parsing the length
+ length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
+
+ ## THEN assert that the length is correct
+ assert length == -1
+
+def test_get_sv_length_translocation():
+ ## GIVEN an translocation
+ alt_len = 16
+ ref_len = 1
+ category = 'sv'
+ pos = 726044
+ end = None
+ svtype = 'bnd'
+ svlen = None
+
+ ## WHEN parsing the length
+ length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
+
+ ## THEN assert that the length is correct
+ assert length == 10e10
+
+def test_get_sv_length_cnvnator_del():
+ ## GIVEN an cnvnator type deletion
+ alt_len = 5
+ ref_len = 1
+ category = 'sv'
+ pos = 1
+ end = 10000
+ svtype = 'del'
+ svlen = -10000
+
+ ## WHEN parsing the length
+ length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
+
+ ## THEN assert that the length is correct
+ assert length == 10000
+
+def test_get_sv_length_del_no_length():
+ ## GIVEN an deletion without len
+ alt_len = 5
+ ref_len = 1
+ category = 'sv'
+ pos = 869314
+ end = 870246
+ svtype = 'del'
+ svlen = None
+
+ ## WHEN parsing the length
+ length = get_length(alt_len, ref_len, category, pos, end, svtype, svlen)
+
+ ## THEN assert that the length is correct
+ assert length == end - pos
+
+###### parse end #######
+# get_end(pos, alt, category, snvend, svend, svlen)
+
+# snv/indels are easy since cyvcf2 are parsing the end for us
+
+def test_get_end_snv():
+ alt = 'T'
+ category = 'snv'
+ pos = snvend = 879537
+
+ end = get_end(pos, alt, category, snvend, svend=None, svlen=None)
+
+ assert end == snvend
+
+def test_get_end_indel():
+ alt = 'C'
+ category = 'indel'
+ pos = 302253
+ snvend = 302265
+
+ end = get_end(pos, alt, category, snvend, svend=None, svlen=None)
+
+ assert end == snvend
+
+# SVs are much harder since there are a lot of corner cases
+# Most SVs (except translocations) have END annotated in INFO field
+# The problem is that many times END==POS and then we have to do some magic on our own
+
+def test_get_end_tiddit_translocation():
+ ## GIVEN a translocation
+ alt = 'N[hs37d5:12060532['
+ category = 'sv'
+ pos = 724779
+
+ ## WHEN parsing the end coordinate
+ end = get_end(pos, alt, category, snvend=None, svend=None, svlen=None)
+
+ ## THEN assert that the end is the same as en coordinate described in alt field
+ assert end == 12060532
+
+def test_get_end_tiddit_translocation():
+ ## GIVEN a translocation
+ alt = 'N[hs37d5:12060532['
+ category = 'sv'
+ pos = 724779
+
+ ## WHEN parsing the end coordinate
+ end = get_end(pos, alt, category, snvend=None, svend=None, svlen=None)
+
+ ## THEN assert that the end is the same as en coordinate described in alt field
+ assert end == 12060532
+
+def test_get_end_deletion():
+ ## GIVEN a translocation
+ alt = '<DEL>'
+ category = 'sv'
+ pos = 869314
+ svend = 870246
+ svlen = None
+
+ ## WHEN parsing the end coordinate
+ end = get_end(pos, alt, category, snvend=None, svend=svend, svlen=svlen)
+
+ ## THEN assert that the end is the same as en coordinate described in alt field
+ assert end == svend
+
+
|
CSV - length -1 for a deletion of 80 genes
https://scout.scilifelab.se/cust003/17159/sv/variants?variant_type=clinical&gene_panels=EP&hgnc_symbols=&size=&chrom=&thousand_genomes_frequency=
please have a look. Something doesn't fit here.
A deletion of 1 bp can not contain 80 genes.
thanks,
Michela
|
0.0
|
e41d7b94106581fa28da793e2ab19c466e2f2f5a
|
[
"tests/parse/test_parse_coordinates.py::test_get_end_deletion",
"tests/parse/test_parse_coordinates.py::test_parse_coordinates_snv",
"tests/parse/test_parse_coordinates.py::test_get_sv_length_translocation",
"tests/parse/test_parse_coordinates.py::test_get_sv_length_del_no_length",
"tests/parse/test_parse_coordinates.py::test_get_sv_length_large_ins_no_length",
"tests/parse/test_parse_coordinates.py::test_get_sv_length_cnvnator_del",
"tests/parse/test_parse_coordinates.py::test_get_end_indel",
"tests/parse/test_parse_coordinates.py::test_parse_coordinates_translocation",
"tests/parse/test_parse_coordinates.py::test_get_end_tiddit_translocation",
"tests/parse/test_parse_coordinates.py::test_get_sv_length_small_ins",
"tests/parse/test_parse_coordinates.py::test_get_end_snv",
"tests/parse/test_parse_coordinates.py::test_parse_coordinates_indel"
] |
[
"tests/parse/test_parse_coordinates.py::test_get_subcategory_indel",
"tests/parse/test_parse_coordinates.py::test_get_length_snv",
"tests/parse/test_parse_coordinates.py::test_get_length_indel",
"tests/parse/test_parse_coordinates.py::test_get_subcategory_snv"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2017-11-10 12:04:32+00:00
|
bsd-3-clause
| 143
|
|
learningequality__ricecooker-394
|
diff --git a/Makefile b/Makefile
index aabbbbd..4a0a854 100644
--- a/Makefile
+++ b/Makefile
@@ -59,6 +59,14 @@ test: clean-test ## run tests quickly with the default Python
test-all: clean-test ## run tests on every Python version with tox
tox
+integration-test:
+ echo "Testing against hotfixes"
+ CONTENTWORKSHOP_URL=https://hotfixes.studio.learningequality.org python tests/test_chef_integration.py
+ echo "Testing against unstable"
+ CONTENTWORKSHOP_URL=https://unstable.studio.learningequality.org python tests/test_chef_integration.py
+ echo "Testing against production"
+ CONTENTWORKSHOP_URL=https://studio.learningequality.org python tests/test_chef_integration.py
+
coverage: ## check code coverage quickly with the default Python
pip install coverage pytest
coverage run --source ricecooker -m pytest
diff --git a/ricecooker/classes/nodes.py b/ricecooker/classes/nodes.py
index 3f6e794..2f28609 100644
--- a/ricecooker/classes/nodes.py
+++ b/ricecooker/classes/nodes.py
@@ -463,6 +463,12 @@ class TreeNode(Node):
provider="",
tags=None,
domain_ns=None,
+ grade_levels=None,
+ resource_types=None,
+ learning_activities=None,
+ accessibility_labels=None,
+ categories=None,
+ learner_needs=None,
**kwargs
):
# Map parameters to model variables
@@ -478,6 +484,13 @@ class TreeNode(Node):
self.questions if hasattr(self, "questions") else []
) # Needed for to_dict method
+ self.grade_levels = grade_levels or []
+ self.resource_types = resource_types or []
+ self.learning_activities = learning_activities or []
+ self.accessibility_labels = accessibility_labels or []
+ self.categories = categories or []
+ self.learner_needs = learner_needs or []
+
super(TreeNode, self).__init__(title, **kwargs)
def get_domain_namespace(self):
@@ -569,12 +582,12 @@ class TreeNode(Node):
"copyright_holder": "",
"questions": [],
"extra_fields": json.dumps(self.extra_fields),
- "grade_levels": None,
- "resource_types": None,
- "learning_activities": None,
- "accessibility_categories": None,
- "subjects": None,
- "needs": None,
+ "grade_levels": self.grade_levels,
+ "resource_types": self.resource_types,
+ "learning_activities": self.learning_activities,
+ "accessibility_labels": self.accessibility_labels,
+ "categories": self.categories,
+ "learner_needs": self.learner_needs,
}
def validate(self):
@@ -686,12 +699,6 @@ class ContentNode(TreeNode):
**kwargs
):
self.role = role
- self.grade_levels = grade_levels
- self.resource_types = resource_types
- self.learning_activities = learning_activities
- self.accessibility_labels = accessibility_labels
- self.categories = categories
- self.learner_needs = learner_needs
self.set_license(
license, copyright_holder=copyright_holder, description=license_description
@@ -826,9 +833,9 @@ class ContentNode(TreeNode):
"grade_levels": self.grade_levels,
"resource_types": self.resource_types,
"learning_activities": self.learning_activities,
- "accessibility_categories": self.accessibility_labels,
- "subjects": self.categories,
- "needs": self.learner_needs,
+ "accessibility_labels": self.accessibility_labels,
+ "categories": self.categories,
+ "learner_needs": self.learner_needs,
}
|
learningequality/ricecooker
|
ecea76069def01bae2aff9a3656d5715d85144e2
|
diff --git a/tests/conftest.py b/tests/conftest.py
index aa15b77..a0c5f30 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -171,6 +171,12 @@ def base_data(channel_domain_namespace, title):
"license_description": None,
"aggregator": "", # New in ricecooker 0.6.20
"provider": "", # New in ricecooker 0.6.20
+ "grade_levels": [],
+ "resource_types": [],
+ "learning_activities": [],
+ "accessibility_labels": [],
+ "categories": [],
+ "learner_needs": [],
}
@@ -259,6 +265,12 @@ def contentnode_base_data(base_data):
"copyright_holder": "Copyright Holder",
"license_description": None,
"role": roles.LEARNER,
+ "grade_levels": [],
+ "resource_types": [],
+ "learning_activities": [],
+ "accessibility_labels": [],
+ "categories": [],
+ "learner_needs": [],
}
)
return data
diff --git a/tests/test_chef_integration.py b/tests/test_chef_integration.py
new file mode 100644
index 0000000..1d4d9ad
--- /dev/null
+++ b/tests/test_chef_integration.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python
+import random
+import string
+
+from le_utils.constants import licenses
+
+from ricecooker.chefs import SushiChef
+from ricecooker.classes.files import AudioFile
+from ricecooker.classes.files import DocumentFile
+from ricecooker.classes.files import VideoFile
+from ricecooker.classes.licenses import get_license
+from ricecooker.classes.nodes import AudioNode
+from ricecooker.classes.nodes import DocumentNode
+from ricecooker.classes.nodes import TopicNode
+from ricecooker.classes.nodes import VideoNode
+
+
+class TestChef(SushiChef):
+ """
+ Used as an integration test by actually using Ricecooker to chef local test content into Studio.
+
+ For anything you need to test, add it to the channel created in the `construct_channel`.
+
+ Copied from examples/tutorial/sushichef.py
+ """
+
+ # Be sure we don't conflict with a channel someone else pushed before us when running this test
+ # as the channel source domain and ID determine which Channel is updated on Studio and since
+ # you'll run this with your own API key we can use this random (enough) string generator (thanks SO)
+ # to append a random set of characters to the two values.
+ def randomstring():
+ return "".join(
+ random.choice(string.ascii_uppercase + string.digits) for _ in range(8)
+ )
+
+ channel_info = {
+ "CHANNEL_SOURCE_DOMAIN": "RicecookerIntegrationTest.{}".format(
+ randomstring()
+ ), # who is providing the content (e.g. learningequality.org)
+ "CHANNEL_SOURCE_ID": "RicecookerTests.{}".format(
+ randomstring()
+ ), # channel's unique id
+ "CHANNEL_TITLE": "Ricecooker Testing!",
+ "CHANNEL_LANGUAGE": "en",
+ }
+
+ # CONSTRUCT CHANNEL
+ def construct_channel(self, *args, **kwargs):
+ """
+ This method is reponsible for creating a `ChannelNode` object and
+ populating it with `TopicNode` and `ContentNode` children.
+ """
+ # Create channel
+ ########################################################################
+ channel = self.get_channel(*args, **kwargs) # uses self.channel_info
+
+ # Create topics to add to your channel
+ ########################################################################
+ # Here we are creating a topic named 'Example Topic'
+ exampletopic = TopicNode(source_id="topic-1", title="Example Topic")
+
+ # Now we are adding 'Example Topic' to our channel
+ channel.add_child(exampletopic)
+
+ # You can also add subtopics to topics
+ # Here we are creating a subtopic named 'Example Subtopic'
+ examplesubtopic = TopicNode(source_id="topic-1a", title="Example Subtopic")
+
+ # Now we are adding 'Example Subtopic' to our 'Example Topic'
+ exampletopic.add_child(examplesubtopic)
+
+ # Content
+ # You can add documents (pdfs and ePubs), videos, audios, and other content
+ # let's create a document file called 'Example PDF'
+ document_file = DocumentFile(path="http://www.pdf995.com/samples/pdf.pdf")
+ examplepdf = DocumentNode(
+ title="Example PDF",
+ source_id="example-pdf",
+ files=[document_file],
+ license=get_license(licenses.PUBLIC_DOMAIN),
+ )
+
+ # We are also going to add a video file called 'Example Video'
+ video_file = VideoFile(
+ path="https://ia600209.us.archive.org/27/items/RiceChef/Rice Chef.mp4"
+ )
+ fancy_license = get_license(
+ licenses.SPECIAL_PERMISSIONS,
+ description="Special license for ricecooker fans only.",
+ copyright_holder="The chef video makers",
+ )
+ examplevideo = VideoNode(
+ title="Example Video",
+ source_id="example-video",
+ files=[video_file],
+ license=fancy_license,
+ )
+
+ # Finally, we are creating an audio file called 'Example Audio'
+ audio_file = AudioFile(
+ path="https://ia802508.us.archive.org/5/items/testmp3testfile/mpthreetest.mp3"
+ )
+ exampleaudio = AudioNode(
+ title="Example Audio",
+ source_id="example-audio",
+ files=[audio_file],
+ license=get_license(licenses.PUBLIC_DOMAIN),
+ )
+
+ # Now that we have our files, let's add them to our channel
+ channel.add_child(examplepdf) # Adding 'Example PDF' to your channel
+ exampletopic.add_child(
+ examplevideo
+ ) # Adding 'Example Video' to 'Example Topic'
+ examplesubtopic.add_child(
+ exampleaudio
+ ) # Adding 'Example Audio' to 'Example Subtopic'
+
+ # the `construct_channel` method returns a ChannelNode that will be
+ # processed by the ricecooker framework
+ return channel
+
+
+if __name__ == "__main__":
+ """
+ This code will run when the sushi chef is called from the command line.
+ """
+ chef = TestChef()
+ print(
+ "Note that you will need your Studio API key for this. It will upload to your account."
+ )
+ chef.main()
diff --git a/tests/test_data.py b/tests/test_data.py
index d50d03b..4816941 100644
--- a/tests/test_data.py
+++ b/tests/test_data.py
@@ -145,10 +145,26 @@ def test_video_to_dict(video, video_data):
video_dict.pop("files")
expected_files = video_data.pop("files")
video_data["extra_fields"] = json.dumps(video_data["extra_fields"])
+
assert video.files == expected_files, "Video files do not match"
+
for key, _ in video_data.items():
assert key in video_dict, "Key {} is not found in to_dict method".format(key)
+
+ list_type_keys = [
+ "grade_levels",
+ "learner_needs",
+ "accessibility_labels",
+ "categories",
+ "learning_activities",
+ "resource_types",
+ ]
for key, value in video_dict.items():
+ if key in list_type_keys:
+ assert isinstance(value, list), "{} should be a list, but it's {}".format(
+ key, value
+ )
+
assert value == video_data.get(key), "Mismatched {}: {} != {}".format(
key, value, video_data[key]
)
|
TreeNode to_dict is incompatible with Studio unstable branch
* ricecooker version: v0.7.0-beta6
* Python version: 3.7
* Operating System: Fedora Silverblue 36
### Description
I am creating a ricecooker script which makes use of the new structured metadata fields in the `develop` branch. This script creates a tree of TopicNode and VideoNode objects.
This is all working almost correctly.
The video nodes end up with attributes like `node.grade_levels = [levels.LOWER_PRIMARY]`, and the [`ContentNode.to_dict()`](https://github.com/learningequality/ricecooker/blob/aa857d102fc19a066931a8fe97e4d869e6d0d606/ricecooker/classes/nodes.py#L797-L832) function for each video node produces the expected schema, with something like `{…, "grade_levels": "wnarlxKo", …}`.
The topic nodes do not have the structured metadata attributes. However, the [`TreeNode.to_dict()`](https://github.com/learningequality/ricecooker/blob/aa857d102fc19a066931a8fe97e4d869e6d0d606/ricecooker/classes/nodes.py#L545-L578) function produces output like `{…, "grade_levels": null, …}`.
This is incompatible with the code in Studio which validates attributes for nodes: https://github.com/learningequality/studio/blob/269e7e0b677c569c3a68c0a30a0d0fa342f190c0/contentcuration/contentcuration/views/internal.py#L667-L675. If a structured metadata attribute is included in the body, it _must_ be a list.
The result is a failed upload, with an error like the following:
```
(319)add_nodes()
318 ]
--> 319 for chunk in chunks:
320 payload_children = []
ipdb> response.content
b"['grade_levels must pass a list of values']"
ipdb> response.request.body
'{"root_id": "db6de12407f24f1c9e36752abd49ef2f", "content_data": [{"title": "Basketball", "language": "en", "description": "", "node_id": "8ed9e0083dc45373a385d2ab2d001132", "content_id": "ce34868e07ec5404a35de380ccd502dd", "source_domain": "379151f326fd5a0080473baf3c4c396e", "source_id": "basketball", "author": "", "aggregator": "", "provider": "", "files": [], "tags": [], "kind": "topic", "license": null, "license_description": null, "copyright_holder": "", "questions": [], "extra_fields": "{}", "grade_levels": null, "resource_types": null, "learning_activities": null, "accessibility_categories": null, "subjects": null, "needs": null}, {"title": "Soccer", "language": "en", "description": "", "node_id": "357293c10e8b59e5b383da07d62c6c90", "content_id": "030cbf9bbc8353bbbbb91d641b6318e9", "source_domain": "379151f326fd5a0080473baf3c4c396e", "source_id": "soccer", "author": "", "aggregator": "", "provider": "", "files": [], "tags": [], "kind": "topic", "license": null, "license_description": null, "copyright_holder": "", "questions": [], "extra_fields": "{}", "grade_levels": null, "resource_types": null, "learning_activities": null, "accessibility_categories": null, "subjects": null, "needs": null}, {"title": "Juggling", "language": "en", "description": "", "node_id": "a5f005393b7b5841a6b067a99779d7b5", "content_id": "ef04f09ef399570aba145495ddcce80a", "source_domain": "379151f326fd5a0080473baf3c4c396e", "source_id": "juggling", "author": "", "aggregator": "", "provider": "", "files": [], "tags": [], "kind": "topic", "license": null, "license_description": null, "copyright_holder": "", "questions": [], "extra_fields": "{}", "grade_levels": null, "resource_types": null, "learning_activities": null, "accessibility_categories": null, "subjects": null, "needs": null}]}'
```
|
0.0
|
ecea76069def01bae2aff9a3656d5715d85144e2
|
[
"tests/test_data.py::test_init",
"tests/test_data.py::test_validate",
"tests/test_data.py::test_topic_to_dict",
"tests/test_data.py::test_video_to_dict",
"tests/test_data.py::test_audio_to_dict",
"tests/test_data.py::test_document_to_dict",
"tests/test_data.py::test_html_to_dict",
"tests/test_data.py::test_exercise_to_dict",
"tests/test_data.py::test_slideshow_to_dict"
] |
[
"tests/test_data.py::test_alternative_domain_namespace",
"tests/test_data.py::test_channel_to_dict"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2022-09-22 19:13:03+00:00
|
mit
| 3,530
|
|
xarray-contrib__xskillscore-339
|
diff --git a/CHANGELOG.rst b/CHANGELOG.rst
index 4a955fd..40f2270 100644
--- a/CHANGELOG.rst
+++ b/CHANGELOG.rst
@@ -5,6 +5,11 @@ Changelog History
xskillscore v0.0.23 (2021-xx-xx)
--------------------------------
+Internal Changes
+~~~~~~~~~~~~~~~~
+- :py:func:`~xskillscore.resampling.resample_iterations_idx` do not break when ``dim`` is
+ not coordinate. (:issue:`303`, :pr:`339`) `Aaron Spring`_
+
xskillscore v0.0.22 (2021-06-29)
--------------------------------
diff --git a/xskillscore/core/resampling.py b/xskillscore/core/resampling.py
index e4f1096..5481029 100644
--- a/xskillscore/core/resampling.py
+++ b/xskillscore/core/resampling.py
@@ -107,6 +107,8 @@ def resample_iterations(forecast, iterations, dim="member", dim_max=None, replac
forecast_smp.append(forecast.isel({dim: idx}).assign_coords({dim: new_dim}))
forecast_smp = xr.concat(forecast_smp, dim="iteration", **CONCAT_KWARGS)
forecast_smp["iteration"] = np.arange(iterations)
+ if dim not in forecast.coords:
+ del forecast_smp.coords[dim]
return forecast_smp.transpose(..., "iteration")
@@ -172,7 +174,12 @@ def resample_iterations_idx(
for interannual-to-decadal predictions experiments. Climate Dynamics, 40(1–2),
245–272. https://doi.org/10/f4jjvf
"""
- # equivalent to above
+ if dim not in forecast.coords:
+ forecast.coords[dim] = np.arange(0, forecast[dim].size)
+ dim_coord_set = True
+ else:
+ dim_coord_set = False
+
select_dim_items = forecast[dim].size
new_dim = forecast[dim]
@@ -205,4 +212,6 @@ def resample_iterations_idx(
# return only dim_max members
if dim_max is not None and dim_max <= forecast[dim].size:
forecast_smp = forecast_smp.isel({dim: slice(None, dim_max)})
+ if dim_coord_set:
+ del forecast_smp.coords[dim]
return forecast_smp
|
xarray-contrib/xskillscore
|
ef0c0fd34add126eb88a0334b3da348b9eef971b
|
diff --git a/xskillscore/tests/test_resampling.py b/xskillscore/tests/test_resampling.py
index 6d09fe3..572a55c 100644
--- a/xskillscore/tests/test_resampling.py
+++ b/xskillscore/tests/test_resampling.py
@@ -154,3 +154,14 @@ def test_resample_inputs(a_1d, func, input, chunk, replace):
assert is_dask_collection(actual) if chunk else not is_dask_collection(actual)
# input type preserved
assert type(actual) == type(a_1d)
+
+
[email protected]("func", resample_iterations_funcs)
+def test_resample_dim_no_coord(func):
+ """resample_iterations doesnt fail when no dim coords"""
+ da = xr.DataArray(
+ np.random.rand(100, 3, 3),
+ coords=[("time", np.arange(100)), ("x", np.arange(3)), ("y", np.arange(3))],
+ )
+ del da.coords["time"]
+ assert "time" not in func(da, 2, dim="time").coords
|
raise error if forecast doesn't contain coords in resample_iterations_idx
`a = xr.DataArray(np.random.rand(1000, 3, 3), dims=['time', 'x', 'y'])`
doesn't work in
`xs.resample_iterations_idx(a, 500, 'time')`
```
xr.DataArray(
np.random.rand(1000, 3, 3),
coords=[("time", np.arange(1000)), ("x", np.arange(3)), ("y", np.arange(3))],
)
```
does.
Taken from https://github.com/xarray-contrib/xskillscore/pull/302#issuecomment-832863346
|
0.0
|
ef0c0fd34add126eb88a0334b3da348b9eef971b
|
[
"xskillscore/tests/test_resampling.py::test_resample_dim_no_coord[resample_iterations]",
"xskillscore/tests/test_resampling.py::test_resample_dim_no_coord[resample_iterations_idx]"
] |
[
"xskillscore/tests/test_resampling.py::test_resampling_roughly_identical_mean",
"xskillscore/tests/test_resampling.py::test_gen_idx_replace[True]",
"xskillscore/tests/test_resampling.py::test_gen_idx_replace[False]",
"xskillscore/tests/test_resampling.py::test_resample_replace_False_once_same_mean[resample_iterations]",
"xskillscore/tests/test_resampling.py::test_resample_replace_False_once_same_mean[resample_iterations_idx]",
"xskillscore/tests/test_resampling.py::test_resample_dim_max[None-resample_iterations]",
"xskillscore/tests/test_resampling.py::test_resample_dim_max[None-resample_iterations_idx]",
"xskillscore/tests/test_resampling.py::test_resample_dim_max[5-resample_iterations]",
"xskillscore/tests/test_resampling.py::test_resample_dim_max[5-resample_iterations_idx]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-True-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-True-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-False-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-Dataset-False-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-multidim",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-True-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-True-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-False-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations-DataArray-False-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-True-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-True-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-False-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-Dataset-False-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-multidim",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-True-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-True-False]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-False-True]",
"xskillscore/tests/test_resampling.py::test_resample_inputs[resample_iterations_idx-DataArray-False-False]"
] |
{
"failed_lite_validators": [
"has_short_problem_statement",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2021-07-24 22:26:25+00:00
|
apache-2.0
| 6,279
|
|
ej2__python-quickbooks-322
|
diff --git a/quickbooks/mixins.py b/quickbooks/mixins.py
index 9b51a42..2083139 100644
--- a/quickbooks/mixins.py
+++ b/quickbooks/mixins.py
@@ -119,6 +119,53 @@ class SendMixin(object):
class VoidMixin(object):
+
+ def get_void_params(self):
+ qb_object_params_map = {
+ "Payment": {
+ "operation": "update",
+ "include": "void"
+ },
+ "SalesReceipt": {
+ "operation": "update",
+ "include": "void"
+ },
+ "BillPayment": {
+ "operation": "update",
+ "include": "void"
+ },
+ "Invoice": {
+ "operation": "void",
+ },
+ }
+ # setting the default operation to void (the original behavior)
+ return qb_object_params_map.get(self.qbo_object_name, {"operation": "void"})
+
+ def get_void_data(self):
+ qb_object_params_map = {
+ "Payment": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ "sparse": True
+ },
+ "SalesReceipt": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ "sparse": True
+ },
+ "BillPayment": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ "sparse": True
+ },
+ "Invoice": {
+ "Id": self.Id,
+ "SyncToken": self.SyncToken,
+ },
+ }
+ # setting the default operation to void (the original behavior)
+ return qb_object_params_map.get(self.qbo_object_name, {"operation": "void"})
+
def void(self, qb=None):
if not qb:
qb = QuickBooks()
@@ -126,14 +173,12 @@ class VoidMixin(object):
if not self.Id:
raise QuickbooksException('Cannot void unsaved object')
- data = {
- 'Id': self.Id,
- 'SyncToken': self.SyncToken,
- }
-
endpoint = self.qbo_object_name.lower()
url = "{0}/company/{1}/{2}".format(qb.api_url, qb.company_id, endpoint)
- results = qb.post(url, json.dumps(data), params={'operation': 'void'})
+
+ data = self.get_void_data()
+ params = self.get_void_params()
+ results = qb.post(url, json.dumps(data), params=params)
return results
diff --git a/quickbooks/objects/attachable.py b/quickbooks/objects/attachable.py
index e7d23d0..23e7132 100644
--- a/quickbooks/objects/attachable.py
+++ b/quickbooks/objects/attachable.py
@@ -58,7 +58,7 @@ class Attachable(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEnti
else:
json_data = qb.create_object(self.qbo_object_name, self.to_json(), _file_path=self._FilePath)
- if self.FileName:
+ if self.Id is None and self.FileName:
obj = type(self).from_json(json_data['AttachableResponse'][0]['Attachable'])
else:
obj = type(self).from_json(json_data['Attachable'])
diff --git a/quickbooks/objects/billpayment.py b/quickbooks/objects/billpayment.py
index 3f175d9..64569d9 100644
--- a/quickbooks/objects/billpayment.py
+++ b/quickbooks/objects/billpayment.py
@@ -1,6 +1,6 @@
from .base import QuickbooksBaseObject, Ref, LinkedTxn, QuickbooksManagedObject, LinkedTxnMixin, \
QuickbooksTransactionEntity
-from ..mixins import DeleteMixin
+from ..mixins import DeleteMixin, VoidMixin
class CheckPayment(QuickbooksBaseObject):
@@ -47,7 +47,7 @@ class BillPaymentLine(QuickbooksBaseObject):
return str(self.Amount)
-class BillPayment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin):
+class BillPayment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin, VoidMixin):
"""
QBO definition: A BillPayment entity represents the financial transaction of payment
of bills that the business owner receives from a vendor for goods or services purchased
diff --git a/quickbooks/objects/payment.py b/quickbooks/objects/payment.py
index 07e1522..75cbf57 100644
--- a/quickbooks/objects/payment.py
+++ b/quickbooks/objects/payment.py
@@ -3,7 +3,7 @@ from .base import QuickbooksBaseObject, Ref, LinkedTxn, \
LinkedTxnMixin, MetaData
from ..client import QuickBooks
from .creditcardpayment import CreditCardPayment
-from ..mixins import DeleteMixin
+from ..mixins import DeleteMixin, VoidMixin
import json
@@ -21,7 +21,7 @@ class PaymentLine(QuickbooksBaseObject):
return str(self.Amount)
-class Payment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin):
+class Payment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity, LinkedTxnMixin, VoidMixin):
"""
QBO definition: A Payment entity records a payment in QuickBooks. The payment can be
applied for a particular customer against multiple Invoices and Credit Memos. It can also
@@ -81,24 +81,5 @@ class Payment(DeleteMixin, QuickbooksManagedObject, QuickbooksTransactionEntity,
# These fields are for minor version 4
self.TransactionLocationType = None
- def void(self, qb=None):
- if not qb:
- qb = QuickBooks()
-
- if not self.Id:
- raise qb.QuickbooksException('Cannot void unsaved object')
-
- data = {
- 'Id': self.Id,
- 'SyncToken': self.SyncToken,
- 'sparse': True
- }
-
- endpoint = self.qbo_object_name.lower()
- url = "{0}/company/{1}/{2}".format(qb.api_url, qb.company_id, endpoint)
- results = qb.post(url, json.dumps(data), params={'operation': 'update', 'include': 'void'})
-
- return results
-
def __str__(self):
return str(self.TotalAmt)
diff --git a/quickbooks/objects/salesreceipt.py b/quickbooks/objects/salesreceipt.py
index 83b55a2..0a42925 100644
--- a/quickbooks/objects/salesreceipt.py
+++ b/quickbooks/objects/salesreceipt.py
@@ -2,11 +2,11 @@ from .base import Ref, CustomField, QuickbooksManagedObject, LinkedTxnMixin, Add
EmailAddress, QuickbooksTransactionEntity, LinkedTxn
from .tax import TxnTaxDetail
from .detailline import DetailLine
-from ..mixins import QuickbooksPdfDownloadable, DeleteMixin
+from ..mixins import QuickbooksPdfDownloadable, DeleteMixin, VoidMixin
class SalesReceipt(DeleteMixin, QuickbooksPdfDownloadable, QuickbooksManagedObject,
- QuickbooksTransactionEntity, LinkedTxnMixin):
+ QuickbooksTransactionEntity, LinkedTxnMixin, VoidMixin):
"""
QBO definition: SalesReceipt represents the sales receipt that is given to a customer.
A sales receipt is similar to an invoice. However, for a sales receipt, payment is received
|
ej2/python-quickbooks
|
a02ca1ba6bac7ced8e1af07e6a04d7a46818df22
|
diff --git a/tests/integration/test_billpayment.py b/tests/integration/test_billpayment.py
index fab990c..c7ce650 100644
--- a/tests/integration/test_billpayment.py
+++ b/tests/integration/test_billpayment.py
@@ -1,5 +1,6 @@
from datetime import datetime
+from quickbooks.objects import AccountBasedExpenseLine, Ref, AccountBasedExpenseLineDetail
from quickbooks.objects.account import Account
from quickbooks.objects.bill import Bill
from quickbooks.objects.billpayment import BillPayment, BillPaymentLine, CheckPayment
@@ -14,12 +15,30 @@ class BillPaymentTest(QuickbooksTestCase):
self.account_number = datetime.now().strftime('%d%H%M')
self.name = "Test Account {0}".format(self.account_number)
- def test_create(self):
+ def create_bill(self, amount):
+ bill = Bill()
+ line = AccountBasedExpenseLine()
+ line.Amount = amount
+ line.DetailType = "AccountBasedExpenseLineDetail"
+
+ account_ref = Ref()
+ account_ref.type = "Account"
+ account_ref.value = 1
+ line.AccountBasedExpenseLineDetail = AccountBasedExpenseLineDetail()
+ line.AccountBasedExpenseLineDetail.AccountRef = account_ref
+ bill.Line.append(line)
+
+ vendor = Vendor.all(max_results=1, qb=self.qb_client)[0]
+ bill.VendorRef = vendor.to_ref()
+
+ return bill.save(qb=self.qb_client)
+
+ def create_bill_payment(self, bill, amount, private_note, pay_type):
bill_payment = BillPayment()
- bill_payment.PayType = "Check"
- bill_payment.TotalAmt = 200
- bill_payment.PrivateNote = "Private Note"
+ bill_payment.PayType = pay_type
+ bill_payment.TotalAmt = amount
+ bill_payment.PrivateNote = private_note
vendor = Vendor.all(max_results=1, qb=self.qb_client)[0]
bill_payment.VendorRef = vendor.to_ref()
@@ -31,14 +50,18 @@ class BillPaymentTest(QuickbooksTestCase):
ap_account = Account.where("AccountSubType = 'AccountsPayable'", qb=self.qb_client)[0]
bill_payment.APAccountRef = ap_account.to_ref()
- bill = Bill.all(max_results=1, qb=self.qb_client)[0]
-
line = BillPaymentLine()
line.LinkedTxn.append(bill.to_linked_txn())
line.Amount = 200
bill_payment.Line.append(line)
- bill_payment.save(qb=self.qb_client)
+ return bill_payment.save(qb=self.qb_client)
+
+ def test_create(self):
+ # create new bill for testing, reusing the same bill will cause Line to be empty
+ # and the new bill payment will be voided automatically
+ bill = self.create_bill(amount=200)
+ bill_payment = self.create_bill_payment(bill, 200, "Private Note", "Check")
query_bill_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
@@ -48,3 +71,16 @@ class BillPaymentTest(QuickbooksTestCase):
self.assertEqual(len(query_bill_payment.Line), 1)
self.assertEqual(query_bill_payment.Line[0].Amount, 200.0)
+
+ def test_void(self):
+ bill = self.create_bill(amount=200)
+ bill_payment = self.create_bill_payment(bill, 200, "Private Note", "Check")
+ query_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
+ self.assertEqual(query_payment.TotalAmt, 200.0)
+ self.assertNotIn('Voided', query_payment.PrivateNote)
+
+ bill_payment.void(qb=self.qb_client)
+ query_payment = BillPayment.get(bill_payment.Id, qb=self.qb_client)
+
+ self.assertEqual(query_payment.TotalAmt, 0.0)
+ self.assertIn('Voided', query_payment.PrivateNote)
\ No newline at end of file
diff --git a/tests/integration/test_invoice.py b/tests/integration/test_invoice.py
index 4a686a5..c02e40e 100644
--- a/tests/integration/test_invoice.py
+++ b/tests/integration/test_invoice.py
@@ -75,3 +75,14 @@ class InvoiceTest(QuickbooksTestCase):
query_invoice = Invoice.filter(Id=invoice_id, qb=self.qb_client)
self.assertEqual([], query_invoice)
+
+ def test_void(self):
+ customer = Customer.all(max_results=1, qb=self.qb_client)[0]
+ invoice = self.create_invoice(customer)
+ invoice_id = invoice.Id
+ invoice.void(qb=self.qb_client)
+
+ query_invoice = Invoice.get(invoice_id, qb=self.qb_client)
+ self.assertEqual(query_invoice.Balance, 0.0)
+ self.assertEqual(query_invoice.TotalAmt, 0.0)
+ self.assertIn('Voided', query_invoice.PrivateNote)
diff --git a/tests/integration/test_salesreceipt.py b/tests/integration/test_salesreceipt.py
new file mode 100644
index 0000000..ce3bd2a
--- /dev/null
+++ b/tests/integration/test_salesreceipt.py
@@ -0,0 +1,59 @@
+from datetime import datetime
+
+from quickbooks.objects import SalesReceipt, Customer, \
+ SalesItemLine, SalesItemLineDetail, Item
+from tests.integration.test_base import QuickbooksTestCase
+
+
+class SalesReceiptTest(QuickbooksTestCase):
+ def setUp(self):
+ super(SalesReceiptTest, self).setUp()
+
+ self.account_number = datetime.now().strftime('%d%H%M')
+ self.name = "Test Account {0}".format(self.account_number)
+
+ def create_sales_receipt(self, qty=1, unit_price=100.0):
+ sales_receipt = SalesReceipt()
+ sales_receipt.TotalAmt = qty * unit_price
+ customer = Customer.all(max_results=1, qb=self.qb_client)[0]
+ sales_receipt.CustomerRef = customer.to_ref()
+ item = Item.all(max_results=1, qb=self.qb_client)[0]
+ line = SalesItemLine()
+ sales_item_line_detail = SalesItemLineDetail()
+ sales_item_line_detail.ItemRef = item.to_ref()
+ sales_item_line_detail.Qty = qty
+ sales_item_line_detail.UnitPrice = unit_price
+ today = datetime.now()
+ sales_item_line_detail.ServiceDate = today.strftime(
+ "%Y-%m-%d"
+ )
+ line.SalesItemLineDetail = sales_item_line_detail
+ line.Amount = qty * unit_price
+ sales_receipt.Line = [line]
+
+ return sales_receipt.save(qb=self.qb_client)
+
+ def test_create(self):
+ sales_receipt = self.create_sales_receipt(
+ qty=1,
+ unit_price=100.0
+ )
+ query_sales_receipt = SalesReceipt.get(sales_receipt.Id, qb=self.qb_client)
+
+ self.assertEqual(query_sales_receipt.TotalAmt, 100.0)
+ self.assertEqual(query_sales_receipt.Line[0].Amount, 100.0)
+ self.assertEqual(query_sales_receipt.Line[0].SalesItemLineDetail['Qty'], 1)
+ self.assertEqual(query_sales_receipt.Line[0].SalesItemLineDetail['UnitPrice'], 100.0)
+
+ def test_void(self):
+ sales_receipt = self.create_sales_receipt(
+ qty=1,
+ unit_price=100.0
+ )
+ query_sales_receipt = SalesReceipt.get(sales_receipt.Id, qb=self.qb_client)
+ self.assertEqual(query_sales_receipt.TotalAmt, 100.0)
+ self.assertNotIn('Voided', query_sales_receipt.PrivateNote)
+ sales_receipt.void(qb=self.qb_client)
+ query_sales_receipt = SalesReceipt.get(sales_receipt.Id, qb=self.qb_client)
+ self.assertEqual(query_sales_receipt.TotalAmt, 0.0)
+ self.assertIn('Voided', query_sales_receipt.PrivateNote)
diff --git a/tests/unit/test_mixins.py b/tests/unit/test_mixins.py
index c3ff3ed..017df5d 100644
--- a/tests/unit/test_mixins.py
+++ b/tests/unit/test_mixins.py
@@ -4,7 +4,7 @@ import os
import unittest
from urllib.parse import quote
-from quickbooks.objects import Bill, Invoice
+from quickbooks.objects import Bill, Invoice, Payment, BillPayment
from tests.integration.test_base import QuickbooksUnitTestCase
@@ -381,12 +381,33 @@ class SendMixinTest(QuickbooksUnitTestCase):
class VoidMixinTest(QuickbooksUnitTestCase):
@patch('quickbooks.mixins.QuickBooks.post')
- def test_void(self, post):
+ def test_void_invoice(self, post):
invoice = Invoice()
invoice.Id = 2
invoice.void(qb=self.qb_client)
self.assertTrue(post.called)
+ @patch('quickbooks.mixins.QuickBooks.post')
+ def test_void_payment(self, post):
+ payment = Payment()
+ payment.Id = 2
+ payment.void(qb=self.qb_client)
+ self.assertTrue(post.called)
+
+ @patch('quickbooks.mixins.QuickBooks.post')
+ def test_void_sales_receipt(self, post):
+ sales_receipt = SalesReceipt()
+ sales_receipt.Id = 2
+ sales_receipt.void(qb=self.qb_client)
+ self.assertTrue(post.called)
+
+ @patch('quickbooks.mixins.QuickBooks.post')
+ def test_void_bill_payment(self, post):
+ bill_payment = BillPayment()
+ bill_payment.Id = 2
+ bill_payment.void(qb=self.qb_client)
+ self.assertTrue(post.called)
+
def test_delete_unsaved_exception(self):
from quickbooks.exceptions import QuickbooksException
|
Add the ability to void all voidable QB types
Currently, the [VoidMixin](https://github.com/ej2/python-quickbooks/blob/master/quickbooks/mixins.py#L127) is built to be able to void Invoices and uses the `operation=void` param. When it comes to voidable types in QB, we have
* Payment
* SalesReceipt
* BillPayment
* Invoice
Here, Invoice is actually the exception, not the rule. For the first three types, you void them with an `operation=update` and `inlcude=void` params. Invoice is the only one that uses the `operation=void` param.
So currently, the VoidMixin only works for Invoices, and the [Payment type has a special `void` method](https://github.com/ej2/python-quickbooks/blob/master/quickbooks/objects/payment.py#L87) with this one-off functionality, added by [ZedObaia](https://github.com/ZedObaia) to fix his [issue on voiding Payments](https://github.com/ej2/python-quickbooks/issues/247). The other types are not voidable without custom code like
```
def void_receipt(receipt_id: str) -> bool:
qb = get_client()
receipt = QBObjects.SalesReceipt()
sparse_update_data = {
"Id": receipt_id,
"SyncToken": 0,
"sparse": True
}
try:
endpoint = receipt.qbo_object_name.lower()
url = "{0}/company/{1}/{2}".format(qb.api_url, qb.company_id, endpoint)
qb.post(url, json.dumps(sparse_update_data), params={"include": "void"})
return True
except:
log.exception(f"Failed to void receipt {receipt_id}")
return False
```
I would propose then that the VoidMixin be updated to use the `operation=update` and `inlcude=void` params and that the Invoice type should be the only one to have it's own special `void` method with this one-off functionality that uses the `operation=void` param.
|
0.0
|
a02ca1ba6bac7ced8e1af07e6a04d7a46818df22
|
[
"tests/unit/test_mixins.py::VoidMixinTest::test_void_bill_payment",
"tests/unit/test_mixins.py::VoidMixinTest::test_void_sales_receipt"
] |
[
"tests/unit/test_mixins.py::ToJsonMixinTest::test_to_json",
"tests/unit/test_mixins.py::FromJsonMixinTest::test_from_json",
"tests/unit/test_mixins.py::FromJsonMixinTest::test_from_json_missing_detail_object",
"tests/unit/test_mixins.py::ToDictMixinTest::test_to_dict",
"tests/unit/test_mixins.py::ListMixinTest::test_all",
"tests/unit/test_mixins.py::ListMixinTest::test_all_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_choose",
"tests/unit/test_mixins.py::ListMixinTest::test_choose_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_count",
"tests/unit/test_mixins.py::ListMixinTest::test_filter",
"tests/unit/test_mixins.py::ListMixinTest::test_filter_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_order_by",
"tests/unit/test_mixins.py::ListMixinTest::test_order_by_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_query",
"tests/unit/test_mixins.py::ListMixinTest::test_query_with_qb",
"tests/unit/test_mixins.py::ListMixinTest::test_where",
"tests/unit/test_mixins.py::ListMixinTest::test_where_start_position_0",
"tests/unit/test_mixins.py::ListMixinTest::test_where_with_qb",
"tests/unit/test_mixins.py::ReadMixinTest::test_get",
"tests/unit/test_mixins.py::ReadMixinTest::test_get_with_qb",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_create",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_create_with_qb",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_update",
"tests/unit/test_mixins.py::UpdateMixinTest::test_save_update_with_qb",
"tests/unit/test_mixins.py::DownloadPdfTest::test_download_invoice",
"tests/unit/test_mixins.py::DownloadPdfTest::test_download_missing_id",
"tests/unit/test_mixins.py::ObjectListTest::test_object_list_mixin_with_primitives",
"tests/unit/test_mixins.py::ObjectListTest::test_object_list_mixin_with_qb_objects",
"tests/unit/test_mixins.py::DeleteMixinTest::test_delete",
"tests/unit/test_mixins.py::DeleteMixinTest::test_delete_unsaved_exception",
"tests/unit/test_mixins.py::DeleteNoIdMixinTest::test_delete",
"tests/unit/test_mixins.py::SendMixinTest::test_send",
"tests/unit/test_mixins.py::SendMixinTest::test_send_with_send_to_email",
"tests/unit/test_mixins.py::VoidMixinTest::test_delete_unsaved_exception",
"tests/unit/test_mixins.py::VoidMixinTest::test_void_invoice",
"tests/unit/test_mixins.py::VoidMixinTest::test_void_payment"
] |
{
"failed_lite_validators": [
"has_hyperlinks",
"has_many_modified_files",
"has_many_hunks"
],
"has_test_patch": true,
"is_lite": false
}
|
2023-09-20 12:48:04+00:00
|
mit
| 2,085
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.