diff --git a/bin/alias b/bin/alias
new file mode 100755
index 0000000000..50969046fd
--- /dev/null
+++ b/bin/alias
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+set -eo pipefail
+
+[ ! -z $DOCKERHUB_REPO ] && [ $# -eq 2 ]
+
+VERSION="$1"
+ALIAS="$2"
+
+docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
+docker tag $DOCKERHUB_REPO:$VERSION $DOCKERHUB_REPO:$ALIAS
+docker push $DOCKERHUB_REPO:$ALIAS
diff --git a/bin/docker-entrypoint b/bin/docker-entrypoint
index 1bed803efd..a91be66fc8 100755
--- a/bin/docker-entrypoint
+++ b/bin/docker-entrypoint
@@ -5,9 +5,13 @@ worker() {
/app/manage.py db upgrade
WORKERS_COUNT=${WORKERS_COUNT:-2}
QUEUES=${QUEUES:-queries,scheduled_queries,celery}
+ MAX_MEMORY=$(($(/usr/bin/awk '/MemTotal/ {print $2}' /proc/meminfo)/4))
echo "Starting $WORKERS_COUNT workers for queues: $QUEUES..."
- exec /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo --maxtasksperchild=10 -Ofair
+ exec /usr/local/bin/celery worker --app=redash.worker -c$WORKERS_COUNT -Q$QUEUES -linfo \
+ --max-tasks-per-child=10 \
+ --max-memory-per-child=$MAX_MEMORY \
+ -Ofair
}
scheduler() {
@@ -17,7 +21,9 @@ scheduler() {
echo "Starting scheduler and $WORKERS_COUNT workers for queues: $QUEUES..."
- exec /usr/local/bin/celery worker --app=redash.worker --beat -c$WORKERS_COUNT -Q$QUEUES -linfo --maxtasksperchild=10 -Ofair
+ exec /usr/local/bin/celery worker --app=redash.worker --beat -c$WORKERS_COUNT -Q$QUEUES -linfo \
+ --max-tasks-per-child=10 \
+ -Ofair
}
server() {
diff --git a/circle.yml b/circle.yml
index 394437a752..ea21f094fd 100644
--- a/circle.yml
+++ b/circle.yml
@@ -18,16 +18,22 @@ test:
override:
- pytest --junitxml=$CIRCLE_TEST_REPORTS/junit.xml tests/
deployment:
- latest:
+ master:
branch: master
owner: mozilla
commands:
- - ./bin/deploy "latest"
- hub_releases:
+ - ./bin/deploy "master"
+ release:
+ branch: release
+ owner: mozilla
+ commands:
+ - ./bin/deploy "rc"
+ milestone:
tag: /^m[0-9]+(\.[0-9]+)?$/
owner: mozilla
commands:
- ./bin/deploy "$CIRCLE_TAG"
+ - ./bin/alias "$CIRCLE_TAG" "latest"
general:
branches:
ignore:
diff --git a/client/app/components/dashboards/widget.js b/client/app/components/dashboards/widget.js
index ecd061464b..50eda3d21e 100644
--- a/client/app/components/dashboards/widget.js
+++ b/client/app/components/dashboards/widget.js
@@ -66,8 +66,6 @@ function DashboardWidgetCtrl($location, $uibModal, $window, Events, currentUser)
return;
}
- Events.record('delete', 'widget', this.widget.id);
-
this.widget.delete().then(() => {
if (this.deleted) {
this.deleted({});
diff --git a/client/app/components/parameters.js b/client/app/components/parameters.js
index b726f1575c..63a7abb7f2 100644
--- a/client/app/components/parameters.js
+++ b/client/app/components/parameters.js
@@ -123,7 +123,7 @@ function ParametersDirective($location, $uibModal) {
}
scope.parameters.forEach((param) => {
if (param.value !== null || param.value !== '') {
- $location.search(`p_${param.name}`, param.value);
+ $location.search(`p_${param.name}_${param.queryId}`, param.value);
}
});
}, true);
diff --git a/client/app/components/queries/query-editor.js b/client/app/components/queries/query-editor.js
index 951a90bc97..707204fc80 100644
--- a/client/app/components/queries/query-editor.js
+++ b/client/app/components/queries/query-editor.js
@@ -80,18 +80,21 @@ function queryEditor(QuerySnippet, $timeout) {
editor.getSession().setMode(newMode);
});
- $scope.$watch('schema', (newSchema, oldSchema) => {
+ $scope.$watch('autoCompleteSchema', (newSchema, oldSchema) => {
if (newSchema !== oldSchema) {
if (newSchema === undefined) {
return;
}
- const tokensCount = newSchema.reduce((totalLength, table) => totalLength + table.columns.length, 0);
- // If there are too many tokens we disable live autocomplete,
- // as it makes typing slower.
- if (tokensCount > 5000) {
+ const tokensCount =
+ newSchema.reduce((totalLength, table) => totalLength + table.columns.length, 0);
+ // If there are too many tokens or if it's requested via the UI
+ // we disable live autocomplete, as it makes typing slower.
+ if (tokensCount > 5000 || !$scope.$parent.autocompleteQuery) {
editor.setOption('enableLiveAutocompletion', false);
+ editor.setOption('enableBasicAutocompletion', false);
} else {
editor.setOption('enableLiveAutocompletion', true);
+ editor.setOption('enableBasicAutocompletion', true);
}
}
});
@@ -99,6 +102,10 @@ function queryEditor(QuerySnippet, $timeout) {
$scope.$parent.$on('angular-resizable.resizing', () => {
editor.resize();
});
+ $scope.$parent.$watch('autocompleteQuery', () => {
+ editor.setOption('enableLiveAutocompletion', $scope.$parent.autocompleteQuery);
+ editor.setOption('enableBasicAutocompletion', $scope.$parent.autocompleteQuery);
+ });
editor.focus();
},
@@ -106,31 +113,44 @@ function queryEditor(QuerySnippet, $timeout) {
const schemaCompleter = {
getCompletions(state, session, pos, prefix, callback) {
- if (prefix.length === 0 || !$scope.schema) {
+ // make a variable for the auto completion in the query editor
+ $scope.autoCompleteSchema = $scope.schema; // removeExtraSchemaInfo(
+
+ if (prefix.length === 0 || !$scope.autoCompleteSchema) {
callback(null, []);
return;
}
- if (!$scope.schema.keywords) {
+ if (!$scope.autoCompleteSchema.keywords) {
const keywords = {};
- $scope.schema.forEach((table) => {
+ $scope.autoCompleteSchema.forEach((table) => {
keywords[table.name] = 'Table';
- table.columns.forEach((c) => {
- keywords[c] = 'Column';
+ table.columns.forEach((c) => { // autoCompleteColumns
+ if (c.charAt(c.length - 1) === ')') {
+ let parensStartAt = c.indexOf('(') - 1;
+ c = c.substring(0, parensStartAt);
+ parensStartAt = 1; // linter complains without this line
+ }
+ // remove '[P] ' for partition keys
+ if (c.charAt(0) === '[') {
+ c = c.substring(4, c.length);
+ }
+ // keywords[c] = 'Column'; // dups columns
keywords[`${table.name}.${c}`] = 'Column';
});
});
- $scope.schema.keywords = map(keywords, (v, k) => ({
- name: k,
- value: k,
- score: 0,
- meta: v,
- }));
+ $scope.autoCompleteSchema.keywords = map(keywords, (v, k) =>
+ ({
+ name: k,
+ value: k,
+ score: 0,
+ meta: v,
+ }));
}
- callback(null, $scope.schema.keywords);
+ callback(null, $scope.autoCompleteSchema.keywords);
},
};
diff --git a/client/app/components/queries/schedule-dialog.html b/client/app/components/queries/schedule-dialog.html
index f9344238a1..aca492cdfe 100644
--- a/client/app/components/queries/schedule-dialog.html
+++ b/client/app/components/queries/schedule-dialog.html
@@ -19,4 +19,7 @@
Refresh Schedule
Stop scheduling at date/time (format yyyy-MM-ddTHH:mm:ss, like 2016-12-28T14:57:00):
+
diff --git a/client/app/components/queries/schedule-dialog.js b/client/app/components/queries/schedule-dialog.js
index db6ebe0320..41c29e031c 100644
--- a/client/app/components/queries/schedule-dialog.js
+++ b/client/app/components/queries/schedule-dialog.js
@@ -114,11 +114,21 @@ function scheduleUntil() {
};
}
+function scheduleKeepResults() {
+ return {
+ restrict: 'E',
+ scope: {
+ query: '=',
+ saveQuery: '=',
+ },
+ template: '',
+ };
+}
+
const ScheduleForm = {
controller() {
this.query = this.resolve.query;
this.saveQuery = this.resolve.saveQuery;
-
if (this.query.hasDailySchedule()) {
this.refreshType = 'daily';
} else {
@@ -137,5 +147,6 @@ export default function init(ngModule) {
ngModule.directive('queryTimePicker', queryTimePicker);
ngModule.directive('queryRefreshSelect', queryRefreshSelect);
ngModule.directive('scheduleUntil', scheduleUntil);
+ ngModule.directive('scheduleKeepResults', scheduleKeepResults);
ngModule.component('scheduleDialog', ScheduleForm);
}
diff --git a/client/app/pages/admin/outdated-queries/index.js b/client/app/pages/admin/outdated-queries/index.js
index aa5a54ad5d..e1dfd1e280 100644
--- a/client/app/pages/admin/outdated-queries/index.js
+++ b/client/app/pages/admin/outdated-queries/index.js
@@ -3,8 +3,7 @@ import moment from 'moment';
import { Paginator } from '@/lib/pagination';
import template from './outdated-queries.html';
-function OutdatedQueriesCtrl($scope, Events, $http, $timeout) {
- Events.record('view', 'page', 'admin/outdated_queries');
+function OutdatedQueriesCtrl($scope, $http, $timeout) {
$scope.autoUpdate = true;
this.queries = new Paginator([], { itemsPerPage: 50 });
diff --git a/client/app/pages/admin/tasks/index.js b/client/app/pages/admin/tasks/index.js
index 53d9007ea9..bceb11e53c 100644
--- a/client/app/pages/admin/tasks/index.js
+++ b/client/app/pages/admin/tasks/index.js
@@ -3,8 +3,7 @@ import moment from 'moment';
import { Paginator } from '@/lib/pagination';
import template from './tasks.html';
-function TasksCtrl($scope, $location, $http, $timeout, Events) {
- Events.record('view', 'page', 'admin/tasks');
+function TasksCtrl($scope, $location, $http, $timeout) {
$scope.autoUpdate = true;
$scope.selectedTab = 'in_progress';
diff --git a/client/app/pages/alert/index.js b/client/app/pages/alert/index.js
index 62f89a1e9c..6f4252b11b 100644
--- a/client/app/pages/alert/index.js
+++ b/client/app/pages/alert/index.js
@@ -6,8 +6,6 @@ function AlertCtrl($routeParams, $location, $sce, toastr, currentUser, Query, Ev
if (this.alertId === 'new') {
Events.record('view', 'page', 'alerts/new');
- } else {
- Events.record('view', 'alert', this.alertId);
}
this.trustAsHtml = html => $sce.trustAsHtml(html);
diff --git a/client/app/pages/alerts-list/index.js b/client/app/pages/alerts-list/index.js
index 25cf0689a0..19869bcc5b 100644
--- a/client/app/pages/alerts-list/index.js
+++ b/client/app/pages/alerts-list/index.js
@@ -8,9 +8,7 @@ const stateClass = {
};
class AlertsListCtrl {
- constructor(Events, Alert) {
- Events.record('view', 'page', 'alerts');
-
+ constructor(Alert) {
this.showEmptyState = false;
this.showList = false;
diff --git a/client/app/pages/dashboards/dashboard.js b/client/app/pages/dashboards/dashboard.js
index 8e2d1b4259..aaa6000b15 100644
--- a/client/app/pages/dashboards/dashboard.js
+++ b/client/app/pages/dashboards/dashboard.js
@@ -179,7 +179,6 @@ function DashboardCtrl(
(dashboard) => {
this.dashboard = dashboard;
this.isDashboardOwner = currentUser.id === dashboard.user.id || currentUser.hasPermission('admin');
- Events.record('view', 'dashboard', dashboard.id);
renderDashboard(dashboard, force);
if ($location.search().edit === true) {
@@ -229,7 +228,6 @@ function DashboardCtrl(
this.archiveDashboard = () => {
const archive = () => {
- Events.record('archive', 'dashboard', this.dashboard.id);
this.dashboard.$delete();
};
diff --git a/client/app/pages/data-sources/list.js b/client/app/pages/data-sources/list.js
index 12a6b107c3..7ae3bff7aa 100644
--- a/client/app/pages/data-sources/list.js
+++ b/client/app/pages/data-sources/list.js
@@ -1,9 +1,7 @@
import settingsMenu from '@/lib/settings-menu';
import template from './list.html';
-function DataSourcesCtrl(Policy, Events, DataSource) {
- Events.record('view', 'page', 'admin/data_sources');
-
+function DataSourcesCtrl(Policy, DataSource) {
this.policy = Policy;
this.dataSources = DataSource.query();
}
diff --git a/client/app/pages/data-sources/show.js b/client/app/pages/data-sources/show.js
index 0c4b4ae15e..584aa76e96 100644
--- a/client/app/pages/data-sources/show.js
+++ b/client/app/pages/data-sources/show.js
@@ -6,9 +6,8 @@ const logger = debug('redash:http');
function DataSourceCtrl(
$scope, $route, $routeParams, $http, $location, toastr,
- currentUser, AlertDialog, Events, DataSource,
+ currentUser, AlertDialog, DataSource,
) {
- Events.record('view', 'page', 'admin/data_source');
$scope.dataSource = $route.current.locals.dataSource;
$scope.dataSourceId = $routeParams.dataSourceId;
@@ -45,8 +44,6 @@ function DataSourceCtrl(
function deleteDataSource(callback) {
const doDelete = () => {
- Events.record('delete', 'datasource', $scope.dataSource.id);
-
$scope.dataSource.$delete(() => {
toastr.success('Data source deleted successfully.');
$location.path('/data_sources/');
@@ -64,8 +61,6 @@ function DataSourceCtrl(
}
function testConnection(callback) {
- Events.record('test', 'datasource', $scope.dataSource.id);
-
DataSource.test({ id: $scope.dataSource.id }, (httpResponse) => {
if (httpResponse.ok) {
toastr.success('Success');
@@ -81,8 +76,6 @@ function DataSourceCtrl(
}
function getDataSourceVersion(callback) {
- Events.record('test', 'data_source_version', $scope.dataSource.id);
-
DataSource.version({ id: $scope.dataSource.id }, (httpResponse) => {
if (httpResponse.ok) {
const versionNumber = httpResponse.message;
diff --git a/client/app/pages/destinations/list.js b/client/app/pages/destinations/list.js
index 5e96eb2be7..84a87327d3 100644
--- a/client/app/pages/destinations/list.js
+++ b/client/app/pages/destinations/list.js
@@ -1,9 +1,7 @@
import settingsMenu from '@/lib/settings-menu';
import template from './list.html';
-function DestinationsCtrl($scope, $location, toastr, currentUser, Events, Destination) {
- Events.record('view', 'page', 'admin/destinations');
-
+function DestinationsCtrl($scope, $location, toastr, currentUser, Destination) {
$scope.destinations = Destination.query();
}
diff --git a/client/app/pages/destinations/show.js b/client/app/pages/destinations/show.js
index 795d8b77a5..bdba532ebb 100644
--- a/client/app/pages/destinations/show.js
+++ b/client/app/pages/destinations/show.js
@@ -6,9 +6,8 @@ const logger = debug('redash:http');
function DestinationCtrl(
$scope, $route, $routeParams, $http, $location, toastr,
- currentUser, AlertDialog, Events, Destination,
+ currentUser, AlertDialog, Destination,
) {
- Events.record('view', 'page', 'admin/destination');
$scope.destination = $route.current.locals.destination;
$scope.destinationId = $routeParams.destinationId;
@@ -34,8 +33,6 @@ function DestinationCtrl(
$scope.delete = () => {
const doDelete = () => {
- Events.record('delete', 'destination', $scope.destination.id);
-
$scope.destination.$delete(() => {
toastr.success('Destination deleted successfully.');
$location.path('/destinations/');
diff --git a/client/app/pages/groups/data-sources.js b/client/app/pages/groups/data-sources.js
index 1e0a94f9f3..b571981709 100644
--- a/client/app/pages/groups/data-sources.js
+++ b/client/app/pages/groups/data-sources.js
@@ -1,8 +1,7 @@
import { includes } from 'lodash';
import template from './data-sources.html';
-function GroupDataSourcesCtrl($scope, $routeParams, $http, Events, Group, DataSource) {
- Events.record('view', 'group_data_sources', $scope.groupId);
+function GroupDataSourcesCtrl($scope, $routeParams, $http, Group, DataSource) {
$scope.group = Group.get({ id: $routeParams.groupId });
$scope.dataSources = Group.dataSources({ id: $routeParams.groupId });
$scope.newDataSource = {};
diff --git a/client/app/pages/groups/list.js b/client/app/pages/groups/list.js
index 48b1480cfc..3ce9f60174 100644
--- a/client/app/pages/groups/list.js
+++ b/client/app/pages/groups/list.js
@@ -2,8 +2,7 @@ import settingsMenu from '@/lib/settings-menu';
import { Paginator } from '@/lib/pagination';
import template from './list.html';
-function GroupsCtrl($scope, $uibModal, currentUser, Events, Group) {
- Events.record('view', 'page', 'groups');
+function GroupsCtrl($scope, $uibModal, currentUser, Group) {
$scope.currentUser = currentUser;
$scope.groups = new Paginator([], { itemsPerPage: 20 });
Group.query((groups) => {
diff --git a/client/app/pages/groups/show.js b/client/app/pages/groups/show.js
index ae5b7470d3..8b6ba1d15a 100644
--- a/client/app/pages/groups/show.js
+++ b/client/app/pages/groups/show.js
@@ -1,9 +1,7 @@
import { includes } from 'lodash';
import template from './show.html';
-function GroupCtrl($scope, $routeParams, $http, currentUser, Events, Group, User) {
- Events.record('view', 'group', $scope.groupId);
-
+function GroupCtrl($scope, $routeParams, $http, currentUser, Group, User) {
$scope.currentUser = currentUser;
$scope.group = Group.get({ id: $routeParams.groupId });
$scope.members = Group.members({ id: $routeParams.groupId });
diff --git a/client/app/pages/queries-list/index.js b/client/app/pages/queries-list/index.js
index 7c12f3f6ba..622d4b3668 100644
--- a/client/app/pages/queries-list/index.js
+++ b/client/app/pages/queries-list/index.js
@@ -9,9 +9,6 @@ class QueriesListCtrl {
const page = parseInt($location.search().page || 1, 10);
this.term = $location.search().q;
- if (isString(this.term) && this.term !== '') {
- Events.record('search', 'query', '', { term: this.term });
- }
this.defaultOptions = {};
diff --git a/client/app/pages/queries/add-to-dashboard.html b/client/app/pages/queries/add-to-dashboard.html
new file mode 100644
index 0000000000..1f5e6f027a
--- /dev/null
+++ b/client/app/pages/queries/add-to-dashboard.html
@@ -0,0 +1,23 @@
+
+
diff --git a/client/app/pages/queries/add-to-dashboard.js b/client/app/pages/queries/add-to-dashboard.js
new file mode 100644
index 0000000000..292727c141
--- /dev/null
+++ b/client/app/pages/queries/add-to-dashboard.js
@@ -0,0 +1,70 @@
+import template from './add-to-dashboard.html';
+
+const AddToDashboardForm = {
+ controller($sce, Dashboard, currentUser, toastr, Query, Widget) {
+ 'ngInject';
+
+ this.query = this.resolve.query;
+ this.vis = this.resolve.vis;
+ this.saveAddToDashbosard = this.resolve.saveAddToDashboard;
+ this.saveInProgress = false;
+
+ this.trustAsHtml = html => $sce.trustAsHtml(html);
+
+ this.onDashboardSelected = (dash) => {
+ // add widget to dashboard
+ this.saveInProgress = true;
+ this.widgetSize = 1;
+ this.selectedVis = null;
+ this.query = {};
+ this.selected_query = this.query.id;
+ this.type = 'visualization';
+ this.isVisualization = () => this.type === 'visualization';
+
+ const widget = new Widget({
+ visualization_id: this.vis && this.vis.id,
+ dashboard_id: dash.id,
+ options: {},
+ width: this.widgetSize,
+ type: this.type,
+ });
+
+ // (response)
+ widget.save().then(() => {
+ // (dashboard)
+ this.selectedDashboard = Dashboard.get({ slug: dash.slug }, () => {});
+ this.close();
+ }).catch(() => {
+ toastr.error('Widget can not be added');
+ }).finally(() => {
+ this.saveInProgress = false;
+ });
+ };
+
+ this.selectedDashboard = null;
+
+ this.searchDashboards = (term) => { // , limitToUsersDashboards
+ if (!term || term.length < 3) {
+ return;
+ }
+
+ Dashboard.get({
+ q: term,
+ include_drafts: true,
+ }, (results) => {
+ this.dashboards = results.results;
+ });
+ };
+ },
+ bindings: {
+ resolve: '<',
+ close: '&',
+ dismiss: '&',
+ vis: '<',
+ },
+ template,
+};
+
+export default function (ngModule) {
+ ngModule.component('addToDashboardDialog', AddToDashboardForm);
+}
diff --git a/client/app/pages/queries/query.html b/client/app/pages/queries/query.html
index f2e0f8e525..99e9f92e12 100644
--- a/client/app/pages/queries/query.html
+++ b/client/app/pages/queries/query.html
@@ -45,7 +45,7 @@
-
+
-
\ No newline at end of file
+
diff --git a/client/app/pages/users/list.js b/client/app/pages/users/list.js
index ebf449a269..9618a0e04c 100644
--- a/client/app/pages/users/list.js
+++ b/client/app/pages/users/list.js
@@ -3,9 +3,7 @@ import settingsMenu from '@/lib/settings-menu';
import { Paginator } from '@/lib/pagination';
import template from './list.html';
-function UsersCtrl(currentUser, Policy, Events, User) {
- Events.record('view', 'page', 'users');
-
+function UsersCtrl(currentUser, Policy, User) {
this.currentUser = currentUser;
this.policy = Policy;
this.users = new Paginator([], { itemsPerPage: 20 });
diff --git a/client/app/pages/users/show.js b/client/app/pages/users/show.js
index aec7abe39f..e6f17be3af 100644
--- a/client/app/pages/users/show.js
+++ b/client/app/pages/users/show.js
@@ -6,7 +6,7 @@ import './settings.less';
function UserCtrl(
$scope, $routeParams, $http, $location, toastr,
- clientConfig, currentUser, Events, User,
+ clientConfig, currentUser, User,
) {
$scope.userId = $routeParams.userId;
$scope.currentUser = currentUser;
@@ -16,7 +16,6 @@ function UserCtrl(
$scope.userId = currentUser.id;
}
- Events.record('view', 'user', $scope.userId);
$scope.canEdit = currentUser.hasPermission('admin') || currentUser.id === parseInt($scope.userId, 10);
$scope.showSettings = false;
$scope.showPasswordSettings = false;
diff --git a/client/app/services/query-result.js b/client/app/services/query-result.js
index 0f2281223c..a6bff2acb7 100644
--- a/client/app/services/query-result.js
+++ b/client/app/services/query-result.js
@@ -54,6 +54,7 @@ function addPointToSeries(point, seriesCollection, seriesName) {
function QueryResultService($resource, $timeout, $q) {
const QueryResultResource = $resource('api/query_results/:id', { id: '@id' }, { post: { method: 'POST' } });
+ const QueryResultSetResource = $resource('api/queries/:id/resultset', { id: '@id' });
const Job = $resource('api/jobs/:id', { id: '@id' });
const statuses = {
1: 'waiting',
@@ -421,6 +422,15 @@ function QueryResultService($resource, $timeout, $q) {
return queryResult;
}
+ static getResultSet(queryId) {
+ const queryResult = new QueryResult();
+
+ QueryResultSetResource.get({ id: queryId }, (response) => {
+ queryResult.update(response);
+ });
+
+ return queryResult;
+ }
loadResult(tryCount) {
QueryResultResource.get(
{ id: this.job.query_result_id },
diff --git a/client/app/services/query.js b/client/app/services/query.js
index ac9c97b719..b446e80727 100644
--- a/client/app/services/query.js
+++ b/client/app/services/query.js
@@ -112,12 +112,14 @@ class Parameters {
});
const parameterExists = p => includes(parameterNames, p.name);
- this.query.options.parameters = this.query.options.parameters.filter(parameterExists).map(p => new Parameter(p));
+ this.query.options.parameters = this.query.options.parameters
+ .filter(parameterExists)
+ .map(p => new Parameter(Object.assign({ queryId: this.query.id }, p)));
}
initFromQueryString(queryString) {
this.get().forEach((param) => {
- const queryStringName = `p_${param.name}`;
+ const queryStringName = `p_${param.name}_${this.query.id}`;
if (has(queryString, queryStringName)) {
param.value = queryString[queryStringName];
}
@@ -320,7 +322,11 @@ function QueryResource($resource, $http, $q, $location, currentUser, QueryResult
this.latest_query_data_id = null;
}
- if (this.latest_query_data && maxAge !== 0) {
+ if (this.schedule_resultset_size) {
+ if (!this.queryResult) {
+ this.queryResult = QueryResult.getResultSet(this.id);
+ }
+ } else if (this.latest_query_data && maxAge !== 0) {
if (!this.queryResult) {
this.queryResult = new QueryResult({
query_result: this.latest_query_data,
@@ -357,7 +363,7 @@ function QueryResource($resource, $http, $q, $location, currentUser, QueryResult
params += '&';
}
- params += `p_${encodeURIComponent(name)}=${encodeURIComponent(value)}`;
+ params += `p_${encodeURIComponent(name)}_${this.id}=${encodeURIComponent(value)}`;
});
}
diff --git a/client/app/visualizations/chart/chart-editor.html b/client/app/visualizations/chart/chart-editor.html
index 202b92f0a9..a98937e123 100644
--- a/client/app/visualizations/chart/chart-editor.html
+++ b/client/app/visualizations/chart/chart-editor.html
@@ -9,11 +9,14 @@
Y Axis
-
+
Series
Data Labels
+
+ Colors
@@ -193,6 +196,12 @@
Show Labels
+
+
+
+
+ How many characters should X Axis Labels be truncated at in the legend?
+
@@ -329,4 +338,35 @@
{{$index == 0 ? 'Left' : 'Right'}} Y Axis
placeholder="(auto)">
-
\ No newline at end of file
+
+
+
+ | zIndex |
+ Label |
+ Color |
+
+
+
+ |
+ |
+
+ {{name}}
+ |
+
+
+
+
+
+
+
+
+
+
+ |
+
+
+
+
+
+
diff --git a/client/app/visualizations/chart/index.js b/client/app/visualizations/chart/index.js
index 8797501220..8f09e4d123 100644
--- a/client/app/visualizations/chart/index.js
+++ b/client/app/visualizations/chart/index.js
@@ -156,19 +156,48 @@ function ChartEditor(ColorPalette, clientConfig) {
}
function refreshSeries() {
- const seriesNames = map(scope.queryResult.getChartData(scope.options.columnMapping), i => i.name);
- const existing = keys(scope.options.seriesOptions);
- each(difference(seriesNames, existing), (name) => {
- scope.options.seriesOptions[name] = {
- type: scope.options.globalSeriesType,
- yAxis: 0,
- };
- scope.form.seriesList.push(name);
- });
- each(difference(existing, seriesNames), (name) => {
- scope.form.seriesList = without(scope.form.seriesList, name);
- delete scope.options.seriesOptions[name];
- });
+ // for pie charts only get color list (x row) instead of series list (y column)
+ if (scope.options.globalSeriesType === 'pie') {
+ const seriesData = scope.queryResult.getData();
+ scope.form.colorsList = [];
+ const xColumnName = scope.form.xAxisColumn;
+ seriesData.forEach((rowOfData) => {
+ scope.form.colorsList.push(rowOfData[xColumnName]);
+ });
+
+ const colorNames = scope.form.colorsList;
+ let existing = [];
+ if (scope.options.colorOptions === undefined) {
+ existing = colorNames;
+ } else {
+ existing = keys(scope.options.colorOptions);
+ }
+ each(difference(colorNames, existing), (name) => {
+ scope.options.colorOptions[name] = {
+ type: scope.options.globalSeriesType,
+ yAxis: 0,
+ };
+ scope.form.colorsList.push(name);
+ });
+ each(difference(existing, colorNames), (name) => {
+ scope.form.colorsList = without(scope.form.colorsList, name);
+ delete scope.options.colorOptions[name];
+ });
+ } else {
+ const seriesNames = map(scope.queryResult.getChartData(scope.options.columnMapping), i => i.name);
+ const existing = keys(scope.options.seriesOptions);
+ each(difference(seriesNames, existing), (name) => {
+ scope.options.seriesOptions[name] = {
+ type: scope.options.globalSeriesType,
+ yAxis: 0,
+ };
+ scope.form.seriesList.push(name);
+ });
+ each(difference(existing, seriesNames), (name) => {
+ scope.form.seriesList = without(scope.form.seriesList, name);
+ delete scope.options.seriesOptions[name];
+ });
+ }
}
function setColumnRole(role, column) {
@@ -200,6 +229,8 @@ function ChartEditor(ColorPalette, clientConfig) {
yAxisColumns: [],
seriesList: sortBy(keys(scope.options.seriesOptions), name =>
scope.options.seriesOptions[name].zIndex),
+ colorsList: sortBy(keys(scope.options.colorOptions), name =>
+ scope.options.colorOptions[name].zIndex),
};
scope.$watchCollection('form.seriesList', (value) => {
@@ -209,7 +240,6 @@ function ChartEditor(ColorPalette, clientConfig) {
});
});
-
scope.$watchCollection('form.yAxisColumns', (value, old) => {
each(old, unsetColumn);
each(value, partial(setColumnRole, 'y'));
@@ -254,6 +284,19 @@ function ChartEditor(ColorPalette, clientConfig) {
scope.options.legend = { enabled: true };
}
+ scope.$watch('options.globalSeriesType', (newType, oldType) => {
+ const defaultXAxisLength = 10;
+ if (!has(scope.options, 'xAxisLabelLength')) {
+ scope.options.xAxisLabelLength = defaultXAxisLength;
+ }
+ if (oldType !== newType) {
+ scope.options.xAxisLabelLength = defaultXAxisLength;
+ if (newType === 'pie') {
+ scope.options.xAxisLabelLength = 300;
+ }
+ }
+ }, true);
+
if (scope.columnNames) {
each(scope.options.columnMapping, (value, key) => {
if (scope.columnNames.length > 0 && !includes(scope.columnNames, key)) {
diff --git a/client/app/visualizations/chart/plotly/utils.js b/client/app/visualizations/chart/plotly/utils.js
index c118f49f75..a4a1fc4401 100644
--- a/client/app/visualizations/chart/plotly/utils.js
+++ b/client/app/visualizations/chart/plotly/utils.js
@@ -208,6 +208,19 @@ function getUnifiedXAxisValues(seriesList, sorted) {
return sorted ? sortBy(result, identity) : result;
}
+const DEFAULT_XAXIS_LABEL_LENGTH = 300;
+
+// We only truncate category x-axis labels because the other types
+// are correctly formatted by Plotly.
+function truncateCategoryAxis(oldXLabel, options) {
+ const xAxisLabelLength = parseInt(options.xAxisLabelLength, 10) || DEFAULT_XAXIS_LABEL_LENGTH;
+
+ if (options && options.xAxis && options.xAxis.type === 'category') {
+ return String(oldXLabel).substr(0, xAxisLabelLength);
+ }
+ return oldXLabel;
+}
+
function preparePieData(seriesList, options) {
const {
cellWidth, cellHeight, xPadding, yPadding, cellsInRow, hasX,
@@ -250,12 +263,21 @@ function preparePieData(seriesList, options) {
});
});
+ const colorPalette = ColorPaletteArray.slice();
return {
values: map(serie.data, i => i.y),
- labels: map(serie.data, row => (hasX ? normalizeValue(row.x) : `Slice ${index}`)),
+ labels: map(serie.data, (row, rowIdx) => {
+
+ const rowX = hasX ? truncateCategoryAxis(normalizeValue(row.x), options) : `Slice ${index}`;
+ const rowOpts = options.seriesOptions[rowX];
+ if (rowOpts) {
+ colorPalette[rowIdx] = rowOpts.color;
+ }
+ return rowX;
+ }),
type: 'pie',
hole: 0.4,
- marker: { colors: ColorPaletteArray },
+ marker: { colors: colorPalette },
hoverinfo,
text: [],
textinfo: options.showDataLabels ? 'percent' : 'none',
@@ -305,7 +327,7 @@ function prepareChartData(seriesList, options) {
const yValues = [];
const yErrorValues = [];
each(data, (row) => {
- const x = normalizeValue(row.x);
+ const x = truncateCategoryAxis(normalizeValue(row.x), options);
const y = normalizeValue(row.y);
const yError = normalizeValue(row.yError);
const size = normalizeValue(row.size);
diff --git a/docker-compose.yml b/docker-compose.yml
index b454410bff..16d6f13582 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -41,3 +41,13 @@ services:
# tests.
command: "postgres -c fsync=off -c full_page_writes=off -c synchronous_commit=OFF"
restart: unless-stopped
+ flower:
+ image: mher/flower:latest
+ command: flower
+ environment:
+ CELERY_BROKER_URL: redis://redis:6379/0
+ CELERY_RESULT_BACKEND: redis://redis:6379/0
+ ports:
+ - "5555:5555"
+ links:
+ - redis
diff --git a/migrations/versions/15041b7085fe_.py b/migrations/versions/15041b7085fe_.py
new file mode 100644
index 0000000000..fcb10aa78f
--- /dev/null
+++ b/migrations/versions/15041b7085fe_.py
@@ -0,0 +1,24 @@
+"""empty message
+
+Revision ID: 15041b7085fe
+Revises: f9571a5ab4f3, 969126bd800f
+Create Date: 2018-02-14 17:52:17.138127
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '15041b7085fe'
+down_revision = ('f9571a5ab4f3', '969126bd800f')
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/migrations/versions/2ba47e9812b1_.py b/migrations/versions/2ba47e9812b1_.py
new file mode 100644
index 0000000000..93d0f59268
--- /dev/null
+++ b/migrations/versions/2ba47e9812b1_.py
@@ -0,0 +1,24 @@
+"""empty message
+
+Revision ID: 2ba47e9812b1
+Revises: 71477dadd6ef, 9d7678c47452
+Create Date: 2018-07-25 16:09:54.769289
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '2ba47e9812b1'
+down_revision = ('71477dadd6ef', '9d7678c47452', )
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/migrations/versions/40384fa03dd1_.py b/migrations/versions/40384fa03dd1_.py
new file mode 100644
index 0000000000..f2c53711c0
--- /dev/null
+++ b/migrations/versions/40384fa03dd1_.py
@@ -0,0 +1,40 @@
+"""Upgrade 'data_scanned' column to form used in upstream
+
+Revision ID: 40384fa03dd1
+Revises: 58f810489c47
+Create Date: 2018-01-18 18:44:04.917081
+
+"""
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects.postgresql import JSONB
+from sqlalchemy.sql.expression import func, cast
+
+# revision identifiers, used by Alembic.
+revision = '40384fa03dd1'
+down_revision = 'fbc0849e2674'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ qr = sa.sql.table('query_results',
+ sa.sql.column('data_scanned', sa.String),
+ sa.sql.column('data', sa.String))
+ op.execute(
+ qr.update()
+ .where(qr.c.data_scanned != '')
+ .where(qr.c.data_scanned != 'error')
+ .where(qr.c.data_scanned != 'N/A')
+ .values(data=cast(
+ func.jsonb_set(cast(qr.c.data, JSONB),
+ '{metadata}',
+ cast('{"data_scanned": ' +
+ qr.c.data_scanned + '}',
+ JSONB)),
+ sa.String)))
+ op.drop_column('query_results', 'data_scanned')
+
+
+def downgrade():
+ op.add_column('query_results', sa.Column('data_scanned', sa.String(length=255), nullable=True))
diff --git a/migrations/versions/58f810489c47_.py b/migrations/versions/58f810489c47_.py
new file mode 100644
index 0000000000..1ed4190288
--- /dev/null
+++ b/migrations/versions/58f810489c47_.py
@@ -0,0 +1,28 @@
+"""add 'data_scanned' column to query_results
+
+Revision ID: 58f810489c47
+Revises: eb2f788f997e
+Create Date: 2017-06-25 21:24:54.942119
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '58f810489c47'
+down_revision = 'eb2f788f997e'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.add_column('query_results', sa.Column('data_scanned', sa.String(length=255), nullable=True))
+ # ### end Alembic commands ###
+
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column('query_results', 'data_scanned')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/9d7678c47452_.py b/migrations/versions/9d7678c47452_.py
new file mode 100644
index 0000000000..d351153c87
--- /dev/null
+++ b/migrations/versions/9d7678c47452_.py
@@ -0,0 +1,34 @@
+"""Incremental query results aggregation
+
+Revision ID: 9d7678c47452
+Revises: 15041b7085fe
+Create Date: 2018-03-08 04:36:12.802199
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = '9d7678c47452'
+down_revision = '15041b7085fe'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ op.create_table('query_resultsets',
+ sa.Column('query_id', sa.Integer(), nullable=False),
+ sa.Column('result_id', sa.Integer(), nullable=False),
+ sa.ForeignKeyConstraint(['query_id'], ['queries.id'], ),
+ sa.ForeignKeyConstraint(['result_id'], ['query_results.id'], ),
+ sa.PrimaryKeyConstraint('query_id', 'result_id')
+ )
+ op.add_column(u'queries', sa.Column('schedule_resultset_size', sa.Integer(), nullable=True))
+1
+
+def downgrade():
+ # ### commands auto generated by Alembic - please adjust! ###
+ op.drop_column(u'queries', 'schedule_resultset_size')
+ op.drop_table('query_resultsets')
+ # ### end Alembic commands ###
diff --git a/migrations/versions/f9571a5ab4f3_.py b/migrations/versions/f9571a5ab4f3_.py
new file mode 100644
index 0000000000..da1ba02d6d
--- /dev/null
+++ b/migrations/versions/f9571a5ab4f3_.py
@@ -0,0 +1,28 @@
+"""Rename 'image_url' to 'profile_image_url'
+
+ a revision was changed after we pulled it from upstream in m12, so it had to
+ be fixed here.
+
+
+Revision ID: f9571a5ab4f3
+Revises: 40384fa03dd1
+Create Date: 2018-01-18 18:04:07.943843
+"""
+from alembic import op
+
+
+# revision identifiers, used by Alembic.
+revision = 'f9571a5ab4f3'
+down_revision = '40384fa03dd1'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # Upstream changed the column name in migration revision 7671dca4e604 --
+ # see git revision 62e5e3892603502c5f3a6da277c33c73510b8819
+ op.alter_column('users', 'image_url', new_column_name='profile_image_url')
+
+
+def downgrade():
+ op.alter_column('users', 'profile_image_url', new_column_name='image_url')
diff --git a/migrations/versions/fbc0849e2674_.py b/migrations/versions/fbc0849e2674_.py
new file mode 100644
index 0000000000..6195141496
--- /dev/null
+++ b/migrations/versions/fbc0849e2674_.py
@@ -0,0 +1,26 @@
+"""
+Merge upstream fulltext search
+
+This formerly merged the fulltext search changes (6b5be7e0a0ef, 5ec5c84ba61e)
+with upstream's 7671dca4e604 - but then those changes moved in the revision
+graph to be direct descendants of that upstream revision, so the merge point
+has been moved.
+
+Revision ID: fbc0849e2674
+Revises: 6b5be7e0a0ef, eb2f788f997e
+Create Date: 2017-12-12 04:45:34.360587
+"""
+
+# revision identifiers, used by Alembic.
+revision = 'fbc0849e2674'
+down_revision = ('6b5be7e0a0ef', '58f810489c47')
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ pass
+
+
+def downgrade():
+ pass
diff --git a/redash/__init__.py b/redash/__init__.py
index bd1793b65c..de16e50a0f 100644
--- a/redash/__init__.py
+++ b/redash/__init__.py
@@ -128,6 +128,11 @@ def create_app(load_admin=True):
app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI
app.config.update(settings.all_settings())
+ def set_response_headers(response):
+ response.headers['X-Content-Type-Options'] = 'nosniff'
+ return response
+
+ app.after_request(set_response_headers)
provision_app(app)
db.init_app(app)
migrate.init_app(app, db)
diff --git a/redash/authentication/remote_user_auth.py b/redash/authentication/remote_user_auth.py
index 9a78da77ab..f159eb7dc9 100644
--- a/redash/authentication/remote_user_auth.py
+++ b/redash/authentication/remote_user_auth.py
@@ -30,6 +30,21 @@ def login(org_slug=None):
logger.error("Cannot use remote user for login when it's not provided in the request (looked in headers['" + settings.REMOTE_USER_HEADER + "'])")
return redirect(url_for('redash.index', next=next_path, org_slug=org_slug))
+ # Check if there is a header of user groups and if yes
+ # check it against a list of allowed user groups from the settings
+ if settings.REMOTE_GROUPS_ENABLED:
+ remote_groups = settings.set_from_string(
+ request.headers.get(settings.REMOTE_GROUPS_HEADER) or ''
+ )
+ allowed_groups = settings.REMOTE_GROUPS_ALLOWED
+ if not allowed_groups.intersection(remote_groups):
+ logger.error(
+ "User groups provided in the %s header are not "
+ "matching the allowed groups.",
+ settings.REMOTE_GROUPS_HEADER
+ )
+ return redirect(url_for('redash.index', next=next_path))
+
logger.info("Logging in " + email + " via remote user")
user = create_and_login_user(current_org, email, email)
diff --git a/redash/handlers/admin.py b/redash/handlers/admin.py
index 919dc91924..51b0c0ca05 100644
--- a/redash/handlers/admin.py
+++ b/redash/handlers/admin.py
@@ -1,10 +1,12 @@
import json
+import time
from flask import request
-from flask_login import login_required
+from flask_login import current_user, login_required
from redash import models, redis_connection
+from redash.authentication import current_org
from redash.handlers import routes
-from redash.handlers.base import json_response
+from redash.handlers.base import json_response, record_event
from redash.permissions import require_super_admin
from redash.tasks.queries import QueryTaskTracker
@@ -23,6 +25,13 @@ def outdated_queries():
else:
outdated_queries = []
+ record_event(current_org, current_user, {
+ 'action': 'view',
+ 'object_type': 'api_call',
+ 'object_id': 'admin/outdated_queries',
+ 'timestamp': int(time.time()),
+ })
+
return json_response(
dict(queries=[q.to_dict(with_stats=True, with_last_modified_by=False)
for q in outdated_queries],
@@ -41,6 +50,12 @@ def queries_tasks():
waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST, limit=waiting_limit)
in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST, limit=progress_limit)
done = QueryTaskTracker.all(QueryTaskTracker.DONE_LIST, limit=done_limit)
+ record_event(current_org, current_user, {
+ 'action': 'view',
+ 'object_type': 'api_call',
+ 'object_id': 'admin/tasks',
+ 'timestamp': int(time.time()),
+ })
response = {
'waiting': [t.data for t in waiting if t is not None],
diff --git a/redash/handlers/alerts.py b/redash/handlers/alerts.py
index 569e5e2753..f5bfc0e13c 100644
--- a/redash/handlers/alerts.py
+++ b/redash/handlers/alerts.py
@@ -15,7 +15,14 @@ class AlertResource(BaseResource):
def get(self, alert_id):
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
+ self.record_event({
+ 'action': 'view',
+ 'timestamp': int(time.time()),
+ 'object_id': alert.id,
+ 'object_type': 'alert'
+ })
return serialize_alert(alert)
+ return alert.to_dict()
def post(self, alert_id):
req = request.get_json(True)
diff --git a/redash/handlers/api.py b/redash/handlers/api.py
index fd6fe5fd15..5e7eb483f9 100644
--- a/redash/handlers/api.py
+++ b/redash/handlers/api.py
@@ -10,7 +10,7 @@
from redash.handlers.data_sources import DataSourceTypeListResource, DataSourceListResource, DataSourceSchemaResource, DataSourceResource, DataSourcePauseResource, DataSourceTestResource, DataSourceVersionResource
from redash.handlers.events import EventsResource
from redash.handlers.queries import QueryForkResource, QueryRefreshResource, QueryListResource, QueryRecentResource, QuerySearchResource, QueryResource, MyQueriesResource, QueryVersionListResource, ChangeResource
-from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource
+from redash.handlers.query_results import QueryResultListResource, QueryResultResource, JobResource, QueryResultSetResource
from redash.handlers.users import UserResource, UserListResource, UserInviteResource, UserResetPasswordResource, UserDisableResource
from redash.handlers.visualizations import VisualizationListResource
from redash.handlers.visualizations import VisualizationResource
@@ -85,6 +85,7 @@ def json_representation(data, code, headers=None):
api.add_org_resource(QueryRefreshResource, '/api/queries//refresh', endpoint='query_refresh')
api.add_org_resource(QueryResource, '/api/queries/', endpoint='query')
api.add_org_resource(QueryForkResource, '/api/queries//fork', endpoint='query_fork')
+api.add_org_resource(QueryResultSetResource, '/api/queries//resultset', endpoint='query_aggregate_results')
api.add_org_resource(QueryVersionListResource, '/api/queries//version', endpoint='query_versions')
api.add_org_resource(ChangeResource, '/api/changes/', endpoint='changes')
diff --git a/redash/handlers/dashboards.py b/redash/handlers/dashboards.py
index 9eb331e6af..568687c644 100644
--- a/redash/handlers/dashboards.py
+++ b/redash/handlers/dashboards.py
@@ -24,7 +24,7 @@ def get(self):
search_term = request.args.get('q')
if search_term:
- results = models.Dashboard.search(self.current_org, self.current_user.group_ids, self.current_user.id, search_term)
+ results = models.Dashboard.search(self.current_org, self.current_user.group_ids, self.current_user.id, search_term, 'include_drafts' in request.args)
else:
results = models.Dashboard.all(self.current_org, self.current_user.group_ids, self.current_user.id)
@@ -103,6 +103,12 @@ def get(self, dashboard_slug=None):
response['can_edit'] = can_modify(dashboard, self.current_user)
+ self.record_event({
+ 'action': 'view',
+ 'object_id': dashboard.id,
+ 'object_type': 'dashboard',
+ })
+
return response
@require_permission('edit_dashboard')
@@ -146,11 +152,18 @@ def post(self, dashboard_slug):
try:
models.db.session.commit()
except StaleDataError:
+ models.db.session.rollback()
abort(409)
except IntegrityError:
+ models.db.session.rollback()
abort(400)
result = serialize_dashboard(dashboard, with_widgets=True, user=self.current_user)
+ self.record_event({
+ 'action': 'edit',
+ 'object_id': dashboard.id,
+ 'object_type': 'dashboard',
+ })
return result
@require_permission('edit_dashboard')
@@ -168,6 +181,11 @@ def delete(self, dashboard_slug):
models.db.session.add(dashboard)
d = serialize_dashboard(dashboard, with_widgets=True, user=self.current_user)
models.db.session.commit()
+ self.record_event({
+ 'action': 'archive',
+ 'object_id': dashboard.id,
+ 'object_type': 'dashboard',
+ })
return d
diff --git a/redash/handlers/data_sources.py b/redash/handlers/data_sources.py
index cfb7a03c24..60de6e04d7 100644
--- a/redash/handlers/data_sources.py
+++ b/redash/handlers/data_sources.py
@@ -25,7 +25,13 @@ class DataSourceResource(BaseResource):
@require_admin
def get(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
- return data_source.to_dict(all=True)
+ ds = data_source.to_dict(all=True)
+ self.record_event({
+ 'action': 'view',
+ 'object_id': data_source.id,
+ 'object_type': 'data_source',
+ })
+ return ds
@require_admin
def post(self, data_source_id):
@@ -48,6 +54,7 @@ def post(self, data_source_id):
try:
models.db.session.commit()
except IntegrityError as e:
+ models.db.session.rollback()
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
@@ -59,6 +66,11 @@ def post(self, data_source_id):
def delete(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
data_source.delete()
+ self.record_event({
+ 'action': 'delete',
+ 'object_id': data_source_id,
+ 'object_type': 'datasource',
+ })
return make_response('', 204)
@@ -83,6 +95,11 @@ def get(self):
except AttributeError:
logging.exception("Error with DataSource#to_dict (data source id: %d)", ds.id)
+ self.record_event({
+ 'action': 'view',
+ 'object_id': 'admin/data_sources',
+ 'object_type': 'api_call',
+ })
return sorted(response.values(), key=lambda d: d['name'].lower())
@require_admin
@@ -111,6 +128,7 @@ def post(self):
models.db.session.commit()
except IntegrityError as e:
+ models.db.session.rollback()
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
@@ -186,6 +204,12 @@ class DataSourceTestResource(BaseResource):
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
+ self.record_event({
+ 'action': 'test',
+ 'object_id': data_source_id,
+ 'object_type': 'datasource',
+ })
+
try:
data_source.query_runner.test_connection()
except Exception as e:
@@ -197,6 +221,11 @@ class DataSourceVersionResource(BaseResource):
def get(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
require_access(data_source.groups, self.current_user, view_only)
+ self.record_event({
+ 'action': 'test',
+ 'object_id': data_source_id,
+ 'object_type': 'data_source_version',
+ })
try:
version_info = data_source.query_runner.get_data_source_version()
except Exception as e:
diff --git a/redash/handlers/destinations.py b/redash/handlers/destinations.py
index c1895b7321..254e51f078 100644
--- a/redash/handlers/destinations.py
+++ b/redash/handlers/destinations.py
@@ -19,7 +19,13 @@ class DestinationResource(BaseResource):
@require_admin
def get(self, destination_id):
destination = models.NotificationDestination.get_by_id_and_org(destination_id, self.current_org)
- return destination.to_dict(all=True)
+ d = destination.to_dict(all=True)
+ self.record_event({
+ 'action': 'view',
+ 'object_id': destination_id,
+ 'object_type': 'destination'
+ })
+ return d
@require_admin
def post(self, destination_id):
@@ -48,6 +54,12 @@ def delete(self, destination_id):
models.db.session.delete(destination)
models.db.session.commit()
+ self.record_event({
+ 'action': 'delete',
+ 'object_id': destination_id,
+ 'object_type': 'destination',
+ })
+
return make_response('', 204)
@@ -63,6 +75,12 @@ def get(self):
d = ds.to_dict()
response[ds.id] = d
+ self.record_event({
+ 'action': 'view',
+ 'object_id': 'admin/destinations',
+ 'object_type': 'api_call',
+ })
+
return response.values()
@require_admin
diff --git a/redash/handlers/groups.py b/redash/handlers/groups.py
index 7790044468..ba72346b10 100644
--- a/redash/handlers/groups.py
+++ b/redash/handlers/groups.py
@@ -30,6 +30,12 @@ def get(self):
groups = models.Group.query.filter(
models.Group.id.in_(self.current_user.group_ids))
+ self.record_event({
+ 'action': 'view',
+ 'object_id': 'groups',
+ 'object_type': 'api_call',
+ })
+
return [g.to_dict() for g in groups]
@@ -59,6 +65,12 @@ def get(self, group_id):
group = models.Group.get_by_id_and_org(group_id, self.current_org)
+ self.record_event({
+ 'action': 'view',
+ 'object_id': group_id,
+ 'object_type': 'group',
+ })
+
return group.to_dict()
@require_admin
@@ -154,6 +166,12 @@ def get(self, group_id):
data_sources = (models.DataSource.query
.join(models.DataSourceGroup)
.filter(models.DataSourceGroup.group == group))
+
+ self.record_event({
+ 'action': 'view',
+ 'object_id': group_id,
+ 'object_type': 'group_data_sources',
+ })
return [ds.to_dict(with_permissions_for=group) for ds in data_sources]
diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py
index a9fc34ae46..35b8f02fd5 100644
--- a/redash/handlers/queries.py
+++ b/redash/handlers/queries.py
@@ -49,6 +49,11 @@ def get(self):
return []
include_drafts = request.args.get('include_drafts') is not None
+ self.record_event({
+ 'action': 'search',
+ 'object_id': term,
+ 'object_type': 'query',
+ })
queries = models.Query.search(term, self.current_user.group_ids, include_drafts=include_drafts, limit=None)
queries = filter_by_tags(queries, models.Query.tags)
@@ -81,6 +86,7 @@ def post(self):
: 0:
+ q.query_results.append(query_result)
query_ids = [q.id for q in queries]
logging.info("Updated %s queries with result (%s).", len(query_ids), query_hash)
@@ -880,6 +887,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
data_source = db.relationship(DataSource, backref='queries')
latest_query_data_id = Column(db.Integer, db.ForeignKey("query_results.id"), nullable=True)
latest_query_data = db.relationship(QueryResult)
+ query_results = db.relationship("QueryResult", secondary="query_resultsets")
name = Column(db.String(255))
description = Column(db.String(4096), nullable=True)
query_text = Column("query", db.Text)
@@ -895,6 +903,7 @@ class Query(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model):
schedule = Column(db.String(10), nullable=True)
schedule_failures = Column(db.Integer, default=0)
schedule_until = Column(db.DateTime(True), nullable=True)
+ schedule_resultset_size = Column(db.Integer, nullable=True)
visualizations = db.relationship("Visualization", cascade="all, delete-orphan")
options = Column(MutableDict.as_mutable(PseudoJSON), default={})
search_vector = Column(TSVectorType('id', 'name', 'description', 'query',
@@ -954,13 +963,13 @@ def all_queries(cls, group_ids, user_id=None, drafts=False):
q = q.filter(or_(Query.is_draft == False, Query.user_id == user_id))
return q
-
+
@classmethod
def favorites(cls, user, base_query=None):
if base_query == None:
base_query = cls.all_queries(user.group_ids, user.id, drafts=True)
return base_query.join((Favorite, and_(Favorite.object_type==u'Query', Favorite.object_id==Query.id))).filter(Favorite.user_id==user.id)
-
+
@classmethod
def all_tags(cls, user, include_drafts=False):
where = cls.is_archived == False
@@ -1009,6 +1018,37 @@ def outdated_queries(cls):
return outdated_queries.values()
+ @classmethod
+ def delete_stale_resultsets(cls):
+ delete_count = 0
+ texts = [c[0] for c in db.session.query(Query.query_text)
+ .filter(Query.schedule_resultset_size != None).distinct()]
+ for text in texts:
+ queries = (Query.query.filter(Query.query_text == text,
+ Query.schedule_resultset_size != None)
+ .order_by(Query.schedule_resultset_size.desc()))
+ # Multiple queries with the same text may request multiple result sets
+ # be kept. We start with the one that keeps the most, and delete both
+ # the unneeded bridge rows and result sets.
+ first_query = queries.first()
+ if first_query is not None and first_query.schedule_resultset_size:
+ resultsets = QueryResultSet.query.filter(QueryResultSet.query_rel == first_query).order_by(QueryResultSet.result_id)
+ resultset_count = resultsets.count()
+ if resultset_count > first_query.schedule_resultset_size:
+ n_to_delete = resultset_count - first_query.schedule_resultset_size
+ r_ids = [r.result_id for r in resultsets][:n_to_delete]
+ QueryResultSet.query.filter(QueryResultSet.result_id.in_(r_ids)).delete(synchronize_session=False)
+ delete_count += QueryResult.query.filter(QueryResult.id.in_(r_ids)).delete(synchronize_session=False)
+ # By this point there are no stale result sets left.
+ # Delete unneeded bridge rows for the remaining queries.
+ for q in queries[1:]:
+ resultsets = db.session.query(QueryResultSet.result_id).filter(QueryResultSet.query_rel == q).order_by(QueryResultSet.result_id)
+ n_to_delete = resultsets.count() - q.schedule_resultset_size
+ if n_to_delete > 0:
+ stale_r = QueryResultSet.query.filter(QueryResultSet.result_id.in_(resultsets.limit(n_to_delete).subquery()))
+ stale_r.delete(synchronize_session=False)
+ return delete_count
+
@classmethod
def search(cls, term, group_ids, include_drafts=False, limit=20):
where = cls.is_archived == False
@@ -1096,6 +1136,16 @@ def __repr__(self):
return '' % (self.id, self.name or 'untitled')
+class QueryResultSet(db.Model):
+ query_id = Column(db.Integer, db.ForeignKey("queries.id"),
+ primary_key=True)
+ query_rel = db.relationship(Query)
+ result_id = Column(db.Integer, db.ForeignKey("query_results.id"),
+ primary_key=True)
+ result = db.relationship(QueryResult)
+ __tablename__ = 'query_resultsets'
+
+
@vectorizer(db.Integer)
def integer_vectorizer(column):
return db.func.cast(column, db.Text)
@@ -1343,7 +1393,7 @@ class Dashboard(ChangeTrackingMixin, TimestampMixin, BelongsToOrgMixin, db.Model
}
@classmethod
- def all(cls, org, group_ids, user_id):
+ def all(cls, org, group_ids, user_id, include_drafts=False):
query = (
Dashboard.query
.options(joinedload(Dashboard.user))
@@ -1359,14 +1409,14 @@ def all(cls, org, group_ids, user_id):
Dashboard.org == org)
.distinct())
- query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == False))
+ query = query.filter(or_(Dashboard.user_id == user_id, Dashboard.is_draft == include_drafts))
return query
@classmethod
- def search(cls, org, groups_ids, user_id, search_term):
+ def search(cls, org, groups_ids, user_id, search_term, include_drafts):
# TODO: switch to FTS
- return cls.all(org, groups_ids, user_id).filter(cls.name.ilike('%{}%'.format(search_term)))
+ return cls.all(org, groups_ids, user_id, include_drafts).filter(cls.name.ilike('%{}%'.format(search_term)))
@classmethod
def all_tags(cls, org, user):
diff --git a/redash/monitor.py b/redash/monitor.py
index f1f241eb26..ced9b42920 100644
--- a/redash/monitor.py
+++ b/redash/monitor.py
@@ -1,3 +1,4 @@
+import json
from redash import redis_connection, models, __version__, settings
@@ -14,6 +15,7 @@ def get_object_counts():
status['unused_query_results_count'] = models.QueryResult.unused().count()
status['dashboards_count'] = models.Dashboard.query.count()
status['widgets_count'] = models.Widget.query.count()
+ status['data_sources'] = json.loads(redis_connection.get('data_sources:health') or '{}')
return status
diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py
index c839d92493..e88ec28ddf 100644
--- a/redash/query_runner/__init__.py
+++ b/redash/query_runner/__init__.py
@@ -1,14 +1,16 @@
-import sys
import logging
import json
+import sys
+
+import requests
-from collections import OrderedDict
from redash import settings
logger = logging.getLogger(__name__)
__all__ = [
'BaseQueryRunner',
+ 'BaseHTTPQueryRunner',
'InterruptException',
'BaseSQLQueryRunner',
'TYPE_DATETIME',
@@ -90,7 +92,7 @@ def get_data_source_version(self):
version = json.loads(data)['rows'][0]['version']
except KeyError as e:
raise Exception(e)
-
+
if self.data_source_version_post_process == "split by space take second":
version = version.split(" ")[1]
elif self.data_source_version_post_process == "split by space take last":
@@ -100,10 +102,12 @@ def get_data_source_version(self):
return version
- def test_connection(self):
+ def test_connection(self, custom_query_text=None):
if self.noop_query is None:
raise NotImplementedError()
- data, error = self.run_query(self.noop_query, None)
+
+ query_text = custom_query_text or self.noop_query
+ data, error = self.run_query(query_text, None)
if error is not None:
raise Exception(error)
@@ -169,6 +173,107 @@ def _get_tables_stats(self, tables_dict):
tables_dict[t]['size'] = res[0]['cnt']
+class BaseHTTPQueryRunner(BaseQueryRunner):
+ response_error = "Endpoint returned unexpected status code"
+ requires_authentication = False
+ url_title = 'URL base path'
+ username_title = 'HTTP Basic Auth Username'
+ password_title = 'HTTP Basic Auth Password'
+
+ @classmethod
+ def configuration_schema(cls):
+ schema = {
+ 'type': 'object',
+ 'properties': {
+ 'url': {
+ 'type': 'string',
+ 'title': cls.url_title,
+ },
+ 'username': {
+ 'type': 'string',
+ 'title': cls.username_title,
+ },
+ 'password': {
+ 'type': 'string',
+ 'title': cls.password_title,
+ },
+ "doc_url": {
+ "type": "string",
+ "title": "Documentation URL",
+ "default": cls.default_doc_url,
+ },
+ "toggle_table_string": {
+ "type": "string",
+ "title": "Toggle Table String",
+ "default": "_v",
+ "info": (
+ "This string will be used to toggle visibility of "
+ "tables in the schema browser when editing a query "
+ "in order to remove non-useful tables from sight."
+ ),
+ }
+ },
+ 'required': ['url'],
+ 'secret': ['password']
+ }
+ if cls.requires_authentication:
+ schema['required'] += ['username', 'password']
+ return schema
+
+ def get_auth(self):
+ username = self.configuration.get('username')
+ password = self.configuration.get('password')
+ if username and password:
+ return (username, password)
+ if self.requires_authentication:
+ raise ValueError("Username and Password required")
+ else:
+ return None
+
+ def get_response(self, url, auth=None, **kwargs):
+ # Get authentication values if not given
+ if auth is None:
+ auth = self.get_auth()
+
+ # Then call requests to get the response from the given endpoint
+ # URL optionally, with the additional requests parameters.
+ error = None
+ response = None
+ try:
+ response = requests.get(url, auth=auth, **kwargs)
+ # Raise a requests HTTP exception with the appropriate reason
+ # for 4xx and 5xx response status codes which is later caught
+ # and passed back.
+ response.raise_for_status()
+
+ # Any other responses (e.g. 2xx and 3xx):
+ if response.status_code != 200:
+ error = '{} ({}).'.format(
+ self.response_error,
+ response.status_code,
+ )
+
+ except requests.HTTPError as exc:
+ logger.exception(exc)
+ error = (
+ "Failed to execute query. "
+ "Return Code: {} Reason: {}".format(
+ response.status_code,
+ response.text
+ )
+ )
+ except requests.RequestException as exc:
+ # Catch all other requests exceptions and return the error.
+ logger.exception(exc)
+ error = str(exc)
+ except Exception as exc:
+ # Catch any other exceptions, log it and reraise it.
+ logger.exception(exc)
+ raise sys.exc_info()[1], None, sys.exc_info()[2]
+
+ return response, error
+
+
query_runners = {}
diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py
index 6437af413d..2f3bcf2b45 100644
--- a/redash/query_runner/athena.py
+++ b/redash/query_runner/athena.py
@@ -156,9 +156,10 @@ def get_schema(self, get_stats=False):
schema = {}
query = """
- SELECT table_schema, table_name, column_name, data_type as column_type
+ SELECT table_schema, table_name, column_name, data_type as column_type, comment as extra_info
FROM information_schema.columns
WHERE table_schema NOT IN ('information_schema')
+ ORDER BY 1, 5 DESC
"""
results, error = self.run_query(query, None)
@@ -170,7 +171,16 @@ def get_schema(self, get_stats=False):
table_name = '{0}.{1}'.format(row['table_schema'], row['table_name'])
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
- schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
+
+ if row['extra_info'] == 'Partition Key':
+ schema[table_name]['columns'].append('[P] ' + row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'] == 'integer' or row['column_type'] == 'varchar' or row['column_type'] == 'timestamp' or row['column_type'] == 'boolean' or row['column_type'] == 'bigint':
+ schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'][0:2] == 'row' or row['column_type'][0:2] == 'map' or row['column_type'][0:2] == 'arr':
+ schema[table_name]['columns'].append(row['column_name'] + ' (row or map or array)')
+ else:
+ schema[table_name]['columns'].append(row['column_name'])
+
return schema.values()
diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py
index 61d8fc6598..04c9c8fefc 100644
--- a/redash/query_runner/jql.py
+++ b/redash/query_runner/jql.py
@@ -1,5 +1,4 @@
import json
-import requests
import re
from collections import OrderedDict
@@ -137,41 +136,15 @@ def get_dict_output_field_name(cls,field_name, member_name):
return None
-class JiraJQL(BaseQueryRunner):
+class JiraJQL(BaseHTTPQueryRunner):
noop_query = '{"queryType": "count"}'
default_doc_url = ("https://confluence.atlassian.com/jirasoftwarecloud/"
"advanced-searching-764478330.html")
-
- @classmethod
- def configuration_schema(cls):
- return {
- 'type': 'object',
- 'properties': {
- 'url': {
- 'type': 'string',
- 'title': 'JIRA URL'
- },
- 'username': {
- 'type': 'string',
- },
- 'password': {
- 'type': 'string'
- },
- "doc_url": {
- "type": "string",
- "title": "Documentation URL",
- "default": cls.default_doc_url
- },
- "toggle_table_string": {
- "type": "string",
- "title": "Toggle Table String",
- "default": "_v",
- "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
- }
- },
- 'required': ['url', 'username', 'password'],
- 'secret': ['password']
- }
+ response_error = "JIRA returned unexpected status code"
+ requires_authentication = True
+ url_title = 'JIRA URL'
+ username_title = 'Username'
+ password_title = 'Password'
@classmethod
def name(cls):
@@ -199,13 +172,9 @@ def run_query(self, query, user):
else:
query['maxResults'] = query.get('maxResults', 1000)
- response = requests.get(jql_url, params=query, auth=(self.configuration.get('username'), self.configuration.get('password')))
-
- if response.status_code == 401 or response.status_code == 403:
- return None, "Authentication error. Please check username/password."
-
- if response.status_code != 200:
- return None, "JIRA returned unexpected status code ({})".format(response.status_code)
+ response, error = self.get_response(jql_url, params=query)
+ if error is not None:
+ return None, error
data = response.json()
@@ -219,4 +188,3 @@ def run_query(self, query, user):
return None, "Query cancelled by user."
register(JiraJQL)
-
diff --git a/redash/query_runner/presto.py b/redash/query_runner/presto.py
index 509e5b9b56..ea94108394 100644
--- a/redash/query_runner/presto.py
+++ b/redash/query_runner/presto.py
@@ -1,4 +1,5 @@
import json
+from markupsafe import Markup, escape
from redash.utils import JSONEncoder
from redash.query_runner import *
@@ -84,9 +85,10 @@ def __init__(self, configuration):
def get_schema(self, get_stats=False):
schema = {}
query = """
- SELECT table_schema, table_name, column_name, data_type as column_type
+ SELECT table_schema, table_name, column_name, data_type as column_type, extra_info
FROM information_schema.columns
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
+ ORDER BY 1, 5 DESC
"""
results, error = self.run_query(query, None)
@@ -102,7 +104,14 @@ def get_schema(self, get_stats=False):
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
- schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
+ if row['extra_info'] == 'partition key':
+ schema[table_name]['columns'].append('[P] ' + row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'] == 'integer' or row['column_type'] == 'varchar' or row['column_type'] == 'timestamp' or row['column_type'] == 'boolean' or row['column_type'] == 'bigint':
+ schema[table_name]['columns'].append(row['column_name'] + ' (' + row['column_type'] + ')')
+ elif row['column_type'][0:2] == 'row' or row['column_type'][0:2] == 'map' or row['column_type'][0:2] == 'arr':
+ schema[table_name]['columns'].append(row['column_name'] + ' (row or map or array)')
+ else:
+ schema[table_name]['columns'].append(row['column_name'])
return schema.values()
@@ -122,6 +131,9 @@ def run_query(self, query, user):
column_tuples = [(i[0], PRESTO_TYPES_MAPPING.get(i[1], None)) for i in cursor.description]
columns = self.fetch_columns(column_tuples)
rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
+ for row in rows:
+ for field in row:
+ field = escape(field)
data = {'columns': columns, 'rows': rows}
json_data = json.dumps(data, cls=JSONEncoder)
error = None
diff --git a/redash/query_runner/url.py b/redash/query_runner/url.py
index c99289cca4..cfc1b03864 100644
--- a/redash/query_runner/url.py
+++ b/redash/query_runner/url.py
@@ -1,34 +1,10 @@
-import requests
-from redash.query_runner import BaseQueryRunner, register
+from redash.query_runner import BaseHTTPQueryRunner, register
-class Url(BaseQueryRunner):
+class Url(BaseHTTPQueryRunner):
default_doc_url = ("http://redash.readthedocs.io/en/latest/"
"datasources.html#url")
- @classmethod
- def configuration_schema(cls):
- return {
- 'type': 'object',
- 'properties': {
- 'url': {
- 'type': 'string',
- 'title': 'URL base path'
- },
- "doc_url": {
- "type": "string",
- "title": "Documentation URL",
- "default": cls.default_doc_url
- },
- "toggle_table_string": {
- "type": "string",
- "title": "Toggle Table String",
- "default": "_v",
- "info": "This string will be used to toggle visibility of tables in the schema browser when editing a query in order to remove non-useful tables from sight."
- }
- }
- }
-
@classmethod
def annotate_query(cls):
return False
@@ -40,7 +16,6 @@ def run_query(self, query, user):
base_url = self.configuration.get("url", None)
try:
- error = None
query = query.strip()
if base_url is not None and base_url != "":
@@ -52,20 +27,17 @@ def run_query(self, query, user):
url = base_url + query
- response = requests.get(url)
- response.raise_for_status()
- json_data = response.content.strip()
+ response, error = self.get_response(url)
+ if error is not None:
+ return None, error
- if not json_data:
- error = "Got empty response from '{}'.".format(url)
+ json_data = response.content.strip()
- return json_data, error
- except requests.RequestException as e:
- return None, str(e)
+ if json_data:
+ return json_data, None
+ else:
+ return None, "Got empty response from '{}'.".format(url)
except KeyboardInterrupt:
- error = "Query cancelled by user."
- json_data = None
-
- return json_data, error
+ return None, "Query cancelled by user."
register(Url)
diff --git a/redash/serializers.py b/redash/serializers.py
index 641c39ce43..44f43ae454 100644
--- a/redash/serializers.py
+++ b/redash/serializers.py
@@ -9,6 +9,7 @@
from flask_login import current_user
from redash import models
from redash.permissions import has_access, view_only
+from redash.handlers.query_results import run_query_sync
def public_widget(widget):
@@ -21,8 +22,15 @@ def public_widget(widget):
'created_at': widget.created_at
}
- if widget.visualization and widget.visualization.id:
- query_data = models.QueryResult.query.get(widget.visualization.query_rel.latest_query_data_id).to_dict()
+ if (widget.visualization and widget.visualization.id and
+ widget.visualization.query_rel is not None):
+ q = widget.visualization.query_rel
+ # make sure the widget's query has a latest_query_data_id that is
+ # not null so public dashboards work
+ if (q.latest_query_data_id is None):
+ run_query_sync(q.data_source, {}, q.query_text)
+
+ query_data = q.latest_query_data.to_dict()
res['visualization'] = {
'type': widget.visualization.type,
'name': widget.visualization.name,
@@ -31,9 +39,10 @@ def public_widget(widget):
'updated_at': widget.visualization.updated_at,
'created_at': widget.visualization.created_at,
'query': {
+ 'id': q.id,
'query': ' ', # workaround, as otherwise the query data won't be loaded.
- 'name': widget.visualization.query_rel.name,
- 'description': widget.visualization.query_rel.description,
+ 'name': q.name,
+ 'description': q.description,
'options': {},
'latest_query_data': query_data
}
@@ -91,6 +100,7 @@ def serialize_query(query, with_stats=False, with_visualizations=False, with_use
'query_hash': query.query_hash,
'schedule': query.schedule,
'schedule_until': query.schedule_until,
+ 'schedule_resultset_size': query.schedule_resultset_size,
'api_key': query.api_key,
'is_archived': query.is_archived,
'is_draft': query.is_draft,
diff --git a/redash/settings/__init__.py b/redash/settings/__init__.py
index 78d322e4f0..b7b88c64d2 100644
--- a/redash/settings/__init__.py
+++ b/redash/settings/__init__.py
@@ -1,7 +1,7 @@
import os
from funcy import distinct, remove
-from .helpers import parse_db_url, fix_assets_path, array_from_string, parse_boolean, int_or_none, set_from_string
+from .helpers import parse_db_url, fix_assets_path, array_from_string, parse_boolean, int_or_none, set_from_string, dict_from_string
def all_settings():
@@ -14,6 +14,7 @@ def all_settings():
return settings
+SESSION_COOKIE_SECURE = True
REDIS_URL = os.environ.get('REDASH_REDIS_URL', os.environ.get('REDIS_URL', "redis://localhost:6379/0"))
PROXIES_COUNT = int(os.environ.get('REDASH_PROXIES_COUNT', "1"))
@@ -82,6 +83,13 @@ def all_settings():
REMOTE_USER_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_REMOTE_USER_LOGIN_ENABLED", "false"))
REMOTE_USER_HEADER = os.environ.get("REDASH_REMOTE_USER_HEADER", "X-Forwarded-Remote-User")
+# When enabled this will match the given remote groups request header with a
+# configured list of allowed user groups using UNIX shell-style wildcards such
+# as * and ?.
+REMOTE_GROUPS_ENABLED = parse_boolean(os.environ.get("REDASH_REMOTE_GROUPS_ENABLED", "false"))
+REMOTE_GROUPS_HEADER = os.environ.get("REDASH_REMOTE_GROUPS_HEADER", "X-Forwarded-Remote-Groups")
+REMOTE_GROUPS_ALLOWED = set_from_string(os.environ.get("REDASH_REMOTE_GROUPS_ALLOWED", ""))
+
# If the organization setting auth_password_login_enabled is not false, then users will still be
# able to login through Redash instead of the LDAP server
LDAP_LOGIN_ENABLED = parse_boolean(os.environ.get('REDASH_LDAP_LOGIN_ENABLED', 'false'))
@@ -234,3 +242,14 @@ def all_settings():
# Allow Parameters in Embeds
# WARNING: With this option enabled, Redash reads query parameters from the request URL (risk of SQL injection!)
ALLOW_PARAMETERS_IN_EMBEDS = parse_boolean(os.environ.get("REDASH_ALLOW_PARAMETERS_IN_EMBEDS", "false"))
+
+# Allow for a map of custom queries to test data source performance and availability.
+# A sample map may look like:
+# {
+# "1": "select 1;",
+# "5": "select 1;"
+# }
+CUSTOM_HEALTH_QUERIES = dict_from_string(os.environ.get("REDASH_CUSTOM_HEALTH_QUERIES", ""))
+
+# Frequency of health query runs in minutes (12 hours by default)
+HEALTH_QUERIES_REFRESH_SCHEDULE = int(os.environ.get("REDASH_HEALTH_QUERIES_REFRESH_SCHEDULE", 720))
diff --git a/redash/settings/helpers.py b/redash/settings/helpers.py
index aa23e7125a..cfe69bd38d 100644
--- a/redash/settings/helpers.py
+++ b/redash/settings/helpers.py
@@ -31,7 +31,13 @@ def array_from_string(s):
if "" in array:
array.remove("")
- return array
+ return [item.strip() for item in array]
+
+def dict_from_string(s):
+ try:
+ return json.loads(s)
+ except ValueError:
+ return {}
def set_from_string(s):
diff --git a/redash/tasks/__init__.py b/redash/tasks/__init__.py
index f242e4c516..05d51aae50 100644
--- a/redash/tasks/__init__.py
+++ b/redash/tasks/__init__.py
@@ -1,3 +1,4 @@
from .general import record_event, version_check, send_mail
+from .health import health_status
from .queries import QueryTask, refresh_queries, refresh_schemas, cleanup_tasks, cleanup_query_results, execute_query
from .alerts import check_alerts_for_query
\ No newline at end of file
diff --git a/redash/tasks/health.py b/redash/tasks/health.py
new file mode 100644
index 0000000000..2502b32ddf
--- /dev/null
+++ b/redash/tasks/health.py
@@ -0,0 +1,59 @@
+import json
+import time
+from random import randint
+
+from celery.utils.log import get_task_logger
+from redash import models, redis_connection, settings, statsd_client
+from redash.worker import celery
+from redash.utils import parse_human_time
+
+logger = get_task_logger(__name__)
+
+
+def update_health_status(data_source_id, data_source_name, query_text, data):
+ key = "data_sources:health"
+
+ cache = json.loads(redis_connection.get(key) or '{}')
+ if data_source_id not in cache:
+ cache[data_source_id] = {
+ "metadata": { "name": data_source_name },
+ "queries": {}
+ }
+ cache[data_source_id]["queries"][query_text] = data
+
+ cache[data_source_id]["status"] = "SUCCESS"
+ for query_status in cache[data_source_id]["queries"].values():
+ if query_status["status"] == "FAIL":
+ cache[data_source_id]["status"] = "FAIL"
+ break
+
+ redis_connection.set(key, json.dumps(cache))
+
+@celery.task(name="redash.tasks.health_status", time_limit=90, soft_time_limit=60)
+def health_status():
+ for ds in models.DataSource.query:
+ logger.info(u"task=health_status state=start ds_id=%s", ds.id)
+
+ runtime = None
+ query_text = ds.query_runner.noop_query
+ custom_queries = settings.CUSTOM_HEALTH_QUERIES
+ ds_id = str(ds.id)
+
+ if custom_queries and ds_id in custom_queries:
+ query_text = custom_queries[ds_id]
+
+ try:
+ start_time = time.time()
+ ds.query_runner.test_connection(query_text)
+ runtime = time.time() - start_time
+ except Exception as e:
+ logger.warning(u"Failed health check for the data source: %s", ds.name, exc_info=1)
+ statsd_client.incr('health_status.error')
+ logger.info(u"task=health_status state=error ds_id=%s runtime=%.2f", ds.id, time.time() - start_time)
+
+ update_health_status(ds_id, ds.name, query_text, {
+ "status": "SUCCESS" if runtime is not None else "FAIL",
+ "last_run": start_time,
+ "last_run_human": str(parse_human_time(str(start_time))),
+ "runtime": runtime
+ })
diff --git a/redash/tasks/queries.py b/redash/tasks/queries.py
index bb64fdd63d..77c01c15aa 100644
--- a/redash/tasks/queries.py
+++ b/redash/tasks/queries.py
@@ -355,6 +355,7 @@ def cleanup_query_results():
deleted_count = models.QueryResult.query.filter(
models.QueryResult.id.in_(unused_query_results.subquery())
).delete(synchronize_session=False)
+ deleted_count += models.Query.delete_stale_resultsets()
models.db.session.commit()
logger.info("Deleted %d unused query results.", deleted_count)
@@ -466,6 +467,8 @@ def run(self):
self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False)
self.scheduled_query.schedule_failures += 1
models.db.session.add(self.scheduled_query)
+ models.db.session.commit()
+ raise result
else:
if (self.scheduled_query and self.scheduled_query.schedule_failures > 0):
self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False)
@@ -482,8 +485,8 @@ def run(self):
self._log_progress('finished')
result = query_result.id
- models.db.session.commit()
- return result
+ models.db.session.commit()
+ return result
def _annotate_query(self, query_runner):
if query_runner.annotate_query():
diff --git a/redash/worker.py b/redash/worker.py
index 629180b1f1..668da00735 100644
--- a/redash/worker.py
+++ b/redash/worker.py
@@ -16,6 +16,10 @@
include='redash.tasks')
celery_schedule = {
+ 'health_status': {
+ 'task': 'redash.tasks.health_status',
+ 'schedule': timedelta(minutes=settings.HEALTH_QUERIES_REFRESH_SCHEDULE)
+ },
'refresh_queries': {
'task': 'redash.tasks.refresh_queries',
'schedule': timedelta(seconds=30)
diff --git a/requirements.txt b/requirements.txt
index e7c91899ad..1bc001f037 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,7 +21,7 @@ blinker==1.3
psycopg2==2.7.3.2
python-dateutil==2.4.2
pytz==2016.7
-PyYAML==3.12
+PyYAML==3.13
redis==2.10.5
requests==2.11.1
six==1.10.0
@@ -46,7 +46,7 @@ pystache==0.5.4
parsedatetime==2.1
cryptography==2.0.2
simplejson==3.10.0
-ua-parser==0.7.3
+ua-parser==0.7.3
user-agents==1.1.0
python-geoip-geolite2==2015.303
chromelogger==0.4.3
@@ -54,3 +54,4 @@ disposable-email-domains
# Uncomment the requirement for ldap3 if using ldap.
# It is not included by default because of the GPL license conflict.
# ldap3==2.2.4
+redash-stmo>=2018.4.0
diff --git a/tests/factories.py b/tests/factories.py
index 6d6f77e628..a051719d26 100644
--- a/tests/factories.py
+++ b/tests/factories.py
@@ -109,7 +109,9 @@ def __call__(self):
query_hash=gen_query_hash('SELECT 1'),
data_source=data_source_factory.create,
org_id=1)
-
+query_resultset_factory = ModelFactory(redash.models.QueryResultSet,
+ query_rel=query_factory.create,
+ result=query_result_factory.create)
visualization_factory = ModelFactory(redash.models.Visualization,
type='CHART',
query_rel=query_factory.create,
@@ -295,6 +297,9 @@ def create_query_result(self, **kwargs):
return query_result_factory.create(**args)
+ def create_query_resultset(self, **kwargs):
+ return query_resultset_factory.create(**kwargs)
+
def create_visualization(self, **kwargs):
args = {
'query_rel': self.create_query()
diff --git a/tests/handlers/test_dashboards.py b/tests/handlers/test_dashboards.py
index 0cd38a5fea..d2a7007c56 100644
--- a/tests/handlers/test_dashboards.py
+++ b/tests/handlers/test_dashboards.py
@@ -5,6 +5,18 @@
from redash.serializers import serialize_dashboard
+class TestRecentDashboardResourceGet(BaseTestCase):
+ def test_get_recent_dashboard_list_does_not_include_deleted(self):
+ d1 = self.factory.create_dashboard()
+ expected = d1.to_dict()
+ d2 = self.factory.create_dashboard() # this shouldn't be required but test fails without it
+ rv = self.make_request('post', '/api/dashboards/{0}'.format(d1.id),
+ data={'name': 'New Name', 'layout': '[]', 'is_archived': True})
+ rvrecent = self.make_request('get', '/api/dashboards/recent')
+ self.assertEquals(rvrecent.status_code, 200)
+ actual = json.loads(rvrecent.data)
+ self.assertNotIn(expected['id'], actual)
+
class TestDashboardListResource(BaseTestCase):
def test_create_new_dashboard(self):
dashboard_name = 'Test Dashboard'
@@ -182,3 +194,31 @@ def test_requires_admin_or_owner(self):
res = self.make_request('delete', '/api/dashboards/{}/share'.format(dashboard.id), user=user)
self.assertEqual(res.status_code, 200)
+
+class TestDashboardSearchResourceGet(BaseTestCase):
+ def create_dashboard_sequence(self):
+ d1 = self.factory.create_dashboard()
+ new_name = 'Analytics'
+ rv1 = self.make_request('post', '/api/dashboards/{0}'.format(d1.id),
+ data={'name': new_name, 'layout': '[]', 'is_draft': False})
+ d2 = self.factory.create_dashboard()
+ rv2 = self.make_request('post', '/api/dashboards/{0}'.format(d2.id),
+ data={'name': 'Metrics', 'layout': '[]', 'is_draft': True})
+ user = self.factory.create_user()
+ return d1, d2, user
+
+ def test_get_dashboard_search_results_does_not_contain_deleted(self):
+ d1, d2, user = self.create_dashboard_sequence()
+ res = self.make_request('delete', '/api/dashboards/{}/share'.format(d2.id))
+ dash_search_list = self.make_request('get','/api/dashboards/search?q=Metrics')
+ dash_search_list_json = json.loads(dash_search_list.data)
+ self.assertNotIn(d2.id, dash_search_list_json)
+
+ def test_get_dashboard_search_results_obeys_draft_flag(self):
+ d1, d2, user = self.create_dashboard_sequence()
+ dash_search_list = self.make_request('get','/api/dashboards/search?q=Metrics&test=True&user_id={}'.format(user.id))
+ dash_search_list_json = json.loads(dash_search_list.data)
+ self.assertNotIn(d2.id, dash_search_list_json)
+ #self.assertIn(d1.id, dash_search_list_json)
+
+
diff --git a/tests/handlers/test_embed.py b/tests/handlers/test_embed.py
index 18f119d786..905a6f8672 100644
--- a/tests/handlers/test_embed.py
+++ b/tests/handlers/test_embed.py
@@ -1,5 +1,8 @@
+import mock
+
from tests import BaseTestCase
from redash.models import db
+from redash.query_runner.pg import PostgreSQL
class TestEmbedVisualization(BaseTestCase):
@@ -97,6 +100,15 @@ def test_inactive_token(self):
res = self.make_request('get', '/api/dashboards/public/{}'.format(api_key.api_key), user=False, is_json=False)
self.assertEqual(res.status_code, 404)
+ def test_dashboard_widgets(self):
+ dashboard = self.factory.create_dashboard()
+ w1 = self.factory.create_widget(dashboard=dashboard)
+ w2 = self.factory.create_widget(dashboard=dashboard, visualization=None, text="a text box")
+ api_key = self.factory.create_api_key(object=dashboard)
+ with mock.patch.object(PostgreSQL, "run_query") as qr:
+ qr.return_value = ("[1, 2]", None)
+ res = self.make_request('get', '/api/dashboards/public/{}'.format(api_key.api_key), user=False, is_json=False)
+ self.assertEqual(res.status_code, 200)
# Not relevant for now, as tokens in api_keys table are only created for dashboards. Once this changes, we should
# add this test.
# def test_token_doesnt_belong_to_dashboard(self):
diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py
index bbc8464917..b120eb14d4 100644
--- a/tests/handlers/test_queries.py
+++ b/tests/handlers/test_queries.py
@@ -1,3 +1,5 @@
+import json
+
from tests import BaseTestCase
from redash import models
from redash.models import db
@@ -256,3 +258,81 @@ def test_get(self):
rv2 = self.make_request('get', '/api/changes/' + str(ch2.id))
self.assertEqual(rv2.status_code, 200)
self.assertEqual(rv2.json['change']['name']['current'], 'version B')
+
+
+class AggregateResultsTests(BaseTestCase):
+ def test_aggregate(self):
+ qtxt = "SELECT x FROM mytable;"
+ q = self.factory.create_query(query_text=qtxt, schedule_resultset_size=3)
+ qr0 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'eve', 'color': 'grue'},
+ {'name': 'mallory', 'color': 'bleen'}]}))
+ qr1 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'}]}))
+ qr2 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'}]}))
+ qr3 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'dave', 'color': 'yellow'},
+ {'name': 'carol', 'color': 'taupe'}]}))
+ for qr in (qr0, qr1, qr2, qr3):
+ self.factory.create_query_resultset(query_rel=q, result=qr)
+ rv = self.make_request('get', '/api/queries/{}/resultset'.format(q.id))
+ self.assertEqual(rv.status_code, 200)
+ self.assertEqual(rv.json['query_result']['data'],
+ {'columns': ['name', 'color'],
+ 'rows': [
+ {'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'},
+ {'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'},
+ {'name': 'dave', 'color': 'yellow'},
+ {'name': 'carol', 'color': 'taupe'}
+ ]})
+
+ def test_underfilled_aggregate(self):
+ qtxt = "SELECT x FROM mytable;"
+ q = self.factory.create_query(query_text=qtxt,
+ schedule_resultset_size=3)
+ qr1 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'}]}))
+ qr2 = self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'}]}))
+ for qr in (qr1, qr2):
+ self.factory.create_query_resultset(query_rel=q, result=qr)
+ rv = self.make_request('get', '/api/queries/{}/resultset'.format(q.id))
+ self.assertEqual(rv.status_code, 200)
+ self.assertEqual(rv.json['query_result']['data'],
+ {'columns': ['name', 'color'],
+ 'rows': [
+ {'name': 'bob', 'color': 'green'},
+ {'name': 'fred', 'color': 'blue'},
+ {'name': 'alice', 'color': 'red'},
+ {'name': 'eddie', 'color': 'orange'}
+ ]})
+
+ def test_no_aggregate(self):
+ qtxt = "SELECT x FROM mytable;"
+ q = self.factory.create_query(query_text=qtxt)
+ self.factory.create_query_result(
+ query_text=qtxt,
+ data=json.dumps({'columns': ['name', 'color'],
+ 'rows': [{'name': 'eve', 'color': 'grue'},
+ {'name': 'mallory', 'color': 'bleen'}]}))
+ rv = self.make_request('get', '/api/queries/{}/resultset'.format(q.id))
+ self.assertEqual(rv.status_code, 404)
diff --git a/tests/handlers/test_widgets.py b/tests/handlers/test_widgets.py
index 702ef6f828..cb89caab47 100644
--- a/tests/handlers/test_widgets.py
+++ b/tests/handlers/test_widgets.py
@@ -64,3 +64,15 @@ def test_delete_widget(self):
self.assertEquals(rv.status_code, 200)
dashboard = models.Dashboard.get_by_slug_and_org(widget.dashboard.slug, widget.dashboard.org)
self.assertEquals(dashboard.widgets.count(), 0)
+
+ def test_updates_textbox_widget(self):
+ widget = self.factory.create_widget()
+
+ rv = self.make_request('post', '/api/widgets/{0}'.format(widget.id), data={'width':2,'text':'sing and shine on', 'options': {}})
+
+ self.assertEquals(rv.status_code, 200)
+ dashboard = models.Dashboard.get_by_slug_and_org(widget.dashboard.slug, widget.dashboard.org)
+ self.assertEquals(dashboard.widgets.count(), 1)
+ self.assertEquals(dashboard.layout, '[]')
+
+
diff --git a/tests/query_runner/test_http.py b/tests/query_runner/test_http.py
new file mode 100644
index 0000000000..e4d88c24a8
--- /dev/null
+++ b/tests/query_runner/test_http.py
@@ -0,0 +1,136 @@
+import mock
+from unittest import TestCase
+
+import requests
+from redash.query_runner import BaseHTTPQueryRunner
+
+
+class RequiresAuthQueryRunner(BaseHTTPQueryRunner):
+ requires_authentication = True
+
+
+class TestBaseHTTPQueryRunner(TestCase):
+
+ def test_requires_authentication_default(self):
+ self.assertFalse(BaseHTTPQueryRunner.requires_authentication)
+ schema = BaseHTTPQueryRunner.configuration_schema()
+ self.assertNotIn('username', schema['required'])
+ self.assertNotIn('password', schema['required'])
+
+ def test_requires_authentication_true(self):
+ schema = RequiresAuthQueryRunner.configuration_schema()
+ self.assertIn('username', schema['required'])
+ self.assertIn('password', schema['required'])
+
+ def test_get_auth_with_values(self):
+ query_runner = BaseHTTPQueryRunner({
+ 'username': 'username',
+ 'password': 'password'
+ })
+ self.assertEqual(query_runner.get_auth(), ('username', 'password'))
+
+ def test_get_auth_empty(self):
+ query_runner = BaseHTTPQueryRunner({})
+ self.assertIsNone(query_runner.get_auth())
+
+ def test_get_auth_empty_requires_authentication(self):
+ query_runner = RequiresAuthQueryRunner({})
+ self.assertRaisesRegexp(
+ ValueError,
+ "Username and Password required",
+ query_runner.get_auth
+ )
+
+ @mock.patch('requests.get')
+ def test_get_response_success(self, mock_get):
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_response.text = "Success"
+ mock_get.return_value = mock_response
+
+ url = 'https://example.com/'
+ query_runner = BaseHTTPQueryRunner({})
+ response, error = query_runner.get_response(url)
+ mock_get.assert_called_once_with(url, auth=None)
+ self.assertEqual(response.status_code, 200)
+ self.assertIsNone(error)
+
+ @mock.patch('requests.get')
+ def test_get_response_success_custom_auth(self, mock_get):
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_response.text = "Success"
+ mock_get.return_value = mock_response
+
+ url = 'https://example.com/'
+ query_runner = BaseHTTPQueryRunner({})
+ auth = ('username', 'password')
+ response, error = query_runner.get_response(url, auth=auth)
+ mock_get.assert_called_once_with(url, auth=auth)
+ self.assertEqual(response.status_code, 200)
+ self.assertIsNone(error)
+
+ @mock.patch('requests.get')
+ def test_get_response_failure(self, mock_get):
+ mock_response = mock.Mock()
+ mock_response.status_code = 301
+ mock_response.text = "Redirect"
+ mock_get.return_value = mock_response
+
+ url = 'https://example.com/'
+ query_runner = BaseHTTPQueryRunner({})
+ response, error = query_runner.get_response(url)
+ mock_get.assert_called_once_with(url, auth=None)
+ self.assertIn(query_runner.response_error, error)
+
+ @mock.patch('requests.get')
+ def test_get_response_httperror_exception(self, mock_get):
+ mock_response = mock.Mock()
+ mock_response.status_code = 500
+ mock_response.text = "Server Error"
+ http_error = requests.HTTPError()
+ mock_response.raise_for_status.side_effect = http_error
+ mock_get.return_value = mock_response
+
+ url = 'https://example.com/'
+ query_runner = BaseHTTPQueryRunner({})
+ response, error = query_runner.get_response(url)
+ mock_get.assert_called_once_with(url, auth=None)
+ self.assertIsNotNone(error)
+ self.assertIn("Failed to execute query", error)
+
+ @mock.patch('requests.get')
+ def test_get_response_requests_exception(self, mock_get):
+ mock_response = mock.Mock()
+ mock_response.status_code = 500
+ mock_response.text = "Server Error"
+ exception_message = "Some requests exception"
+ requests_exception = requests.RequestException(exception_message)
+ mock_response.raise_for_status.side_effect = requests_exception
+ mock_get.return_value = mock_response
+
+ url = 'https://example.com/'
+ query_runner = BaseHTTPQueryRunner({})
+ response, error = query_runner.get_response(url)
+ mock_get.assert_called_once_with(url, auth=None)
+ self.assertIsNotNone(error)
+ self.assertEqual(exception_message, error)
+
+ @mock.patch('requests.get')
+ def test_get_response_generic_exception(self, mock_get):
+ mock_response = mock.Mock()
+ mock_response.status_code = 500
+ mock_response.text = "Server Error"
+ exception_message = "Some generic exception"
+ exception = ValueError(exception_message)
+ mock_response.raise_for_status.side_effect = exception
+ mock_get.return_value = mock_response
+
+ url = 'https://example.com/'
+ query_runner = BaseHTTPQueryRunner({})
+ self.assertRaisesRegexp(
+ ValueError,
+ exception_message,
+ query_runner.get_response,
+ url
+ )
diff --git a/tests/tasks/test_health.py b/tests/tasks/test_health.py
new file mode 100644
index 0000000000..37e4f9ea7d
--- /dev/null
+++ b/tests/tasks/test_health.py
@@ -0,0 +1,136 @@
+import json
+import mock
+from tests import BaseTestCase
+
+from redash import redis_connection
+from redash.tasks.health import update_health_status, health_status
+
+
+class TestHealthStatus(BaseTestCase):
+ def setUp(self):
+ super(TestHealthStatus, self).setUp()
+ self.patched_custom_queries = self._setup_mock('redash.tasks.health.settings')
+ self.patched_updated_health_status = self._setup_mock('redash.tasks.health.update_health_status')
+ self.patched_run_query = self._setup_mock('redash.query_runner.pg.PostgreSQL.run_query')
+
+ self.patched_run_query.return_value = ("some_data", None)
+ self.patched_custom_queries.CUSTOM_HEALTH_QUERIES = ""
+
+ def _setup_mock(self, function_to_patch):
+ patcher = mock.patch(function_to_patch)
+ patched_function = patcher.start()
+ self.addCleanup(patcher.stop)
+ return patched_function
+
+ def test_update_health_status_sets_correct_keys(self):
+ current_health = redis_connection.get('data_sources:health')
+ self.assertEqual(None, current_health)
+
+ DATA_SOURCE = self.factory.create_data_source()
+ QUERY_SUCCESS = "SELECT 1"
+ QUERY_FAIL = "SELECT meep"
+ SOME_DATA_FAIL = {"a": "b", "foo": "bar", "status": "FAIL"}
+ SOME_DATA_SUCCESS = {"a": "b", "foo": "bar", "status": "SUCCESS"}
+ update_health_status(str(DATA_SOURCE.id), DATA_SOURCE.name, QUERY_FAIL, SOME_DATA_FAIL)
+ update_health_status(str(DATA_SOURCE.id), DATA_SOURCE.name, QUERY_SUCCESS, SOME_DATA_SUCCESS)
+
+ '''
+ The expected format of the cached health data is:
+ {
+ "": {
+ "metadata": "",
+ "queries": {
+ "": {...},
+ "": {...},
+ "": {...},
+ ...
+ }
+ },
+ ...
+ }
+ '''
+ current_health = json.loads(redis_connection.get('data_sources:health'))
+
+ # There is 1 data source.
+ self.assertEqual(1, len(current_health.keys()))
+ self.assertEqual(DATA_SOURCE.id, int(current_health.keys()[0]))
+
+ # The data source has "metadata", "queries" and "status" keys.
+ ds_id = str(DATA_SOURCE.id)
+ self.assertEqual(3, len(current_health[ds_id].keys()))
+ self.assertTrue("metadata" in current_health[ds_id].keys())
+ self.assertTrue("queries" in current_health[ds_id].keys())
+ self.assertTrue("status" in current_health[ds_id].keys())
+
+ # There are two queries with correct data
+ self.assertEqual(2, len(current_health[ds_id]["queries"]))
+ self.assertTrue(QUERY_SUCCESS in current_health[ds_id]["queries"].keys())
+ self.assertTrue(QUERY_FAIL in current_health[ds_id]["queries"].keys())
+ self.assertEqual(SOME_DATA_FAIL, current_health[ds_id]["queries"][QUERY_FAIL])
+ self.assertEqual(SOME_DATA_SUCCESS, current_health[ds_id]["queries"][QUERY_SUCCESS])
+ self.assertEqual(SOME_DATA_FAIL["status"], current_health[ds_id]["status"])
+
+ def test_health_status_success(self):
+ data_sources = []
+ for i in range(5):
+ data_sources.append(self.factory.create_data_source())
+
+ health_status()
+
+ # Status is updated for each of the 5 data sources
+ self.assertEqual(self.patched_updated_health_status.call_count, 5)
+
+ # The data source name and id is correctly passed in the last call of update_health_status()
+ args, kwargs = self.patched_updated_health_status.call_args
+ self.assertEqual(str(data_sources[-1].id), args[0])
+ self.assertEqual(data_sources[-1].name, args[1])
+
+ # All expected status keys are available.
+ EXPECTED_KEYS = ["status", "last_run", "last_run_human", "runtime"]
+ NEW_STATUS = args[3]
+ new_status_keys = set(NEW_STATUS.keys())
+ self.assertEqual(set(EXPECTED_KEYS), new_status_keys)
+
+ self.assertEqual("SUCCESS", NEW_STATUS["status"])
+ for key in EXPECTED_KEYS[1:]:
+ self.assertIsNotNone(NEW_STATUS[key])
+
+ def test_health_status_run_query_throws_exception(self):
+ data_source = self.factory.create_data_source()
+
+ def exception_raiser(*args, **kwargs):
+ raise Exception
+
+ self.patched_run_query.side_effect = exception_raiser
+ health_status()
+
+ # Status is updated for the one data source
+ self.assertEqual(self.patched_updated_health_status.call_count, 1)
+
+ # The data source name is correctly passed in the last call of update_health_status()
+ args, kwargs = self.patched_updated_health_status.call_args
+ self.assertEqual(str(data_source.id), args[0])
+ self.assertEqual(data_source.name, args[1])
+ self.assertEqual(data_source.query_runner.noop_query, args[2])
+
+ # All expected status keys are available.
+ EXPECTED_KEYS = ['status', 'last_run', 'last_run_human', 'runtime']
+ NEW_STATUS = args[3]
+ new_status_keys = set(NEW_STATUS.keys())
+ self.assertEqual(set(EXPECTED_KEYS), new_status_keys)
+
+ self.assertEqual('FAIL', NEW_STATUS['status'])
+ self.assertIsNotNone(NEW_STATUS['last_run'])
+ self.assertIsNotNone(NEW_STATUS['last_run_human'])
+ self.assertIsNone(NEW_STATUS['runtime'])
+
+ def test_health_status_custom_query(self):
+ CUSTOM_QUERY = "select * from table"
+ data_source = self.factory.create_data_source()
+ self.patched_custom_queries.CUSTOM_HEALTH_QUERIES = {"1": CUSTOM_QUERY}
+
+ health_status()
+
+ args, kwargs = self.patched_updated_health_status.call_args
+ self.assertNotEqual(data_source.query_runner.noop_query, args[2])
+ self.assertEqual(CUSTOM_QUERY, args[2])
diff --git a/tests/tasks/test_queries.py b/tests/tasks/test_queries.py
index 90bca6cf32..ef50519b6c 100644
--- a/tests/tasks/test_queries.py
+++ b/tests/tasks/test_queries.py
@@ -7,7 +7,8 @@
from tests import BaseTestCase
from redash import redis_connection, models
from redash.query_runner.pg import PostgreSQL
-from redash.tasks.queries import QueryTaskTracker, enqueue_query, execute_query
+from redash.tasks.queries import (QueryExecutionError, QueryTaskTracker,
+ enqueue_query, execute_query)
class TestPrune(TestCase):
@@ -113,11 +114,15 @@ def test_failure_scheduled(self):
{'routing_key': 'test'})
q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
- qr.exception = ValueError("broken")
- execute_query("SELECT 1, 2", self.factory.data_source.id, {}, scheduled_query_id=q.id)
+ qr.side_effect = ValueError("broken")
+ with self.assertRaises(QueryExecutionError):
+ execute_query("SELECT 1, 2", self.factory.data_source.id, {},
+ scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 1)
- execute_query("SELECT 1, 2", self.factory.data_source.id, {}, scheduled_query_id=q.id)
+ with self.assertRaises(QueryExecutionError):
+ execute_query("SELECT 1, 2", self.factory.data_source.id, {},
+ scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 2)
@@ -129,10 +134,11 @@ def test_success_after_failure(self):
{'routing_key': 'test'})
q = self.factory.create_query(query_text="SELECT 1, 2", schedule=300)
with cm, mock.patch.object(PostgreSQL, "run_query") as qr:
- qr.exception = ValueError("broken")
- execute_query("SELECT 1, 2",
- self.factory.data_source.id, {},
- scheduled_query_id=q.id)
+ qr.side_effect = ValueError("broken")
+ with self.assertRaises(QueryExecutionError):
+ execute_query("SELECT 1, 2",
+ self.factory.data_source.id, {},
+ scheduled_query_id=q.id)
q = models.Query.get_by_id(q.id)
self.assertEqual(q.schedule_failures, 1)
diff --git a/tests/test_models.py b/tests/test_models.py
index d719eb3b42..667aa2649d 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -335,22 +335,74 @@ def test_get_latest_returns_the_last_cached_result_for_negative_ttl(self):
class TestUnusedQueryResults(BaseTestCase):
def test_returns_only_unused_query_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
- qr = self.factory.create_query_result()
- query = self.factory.create_query(latest_query_data=qr)
+ qt = "SELECT 1"
+ qr = self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
+ query = self.factory.create_query(query_text=qt, latest_query_data=qr)
+ unused_qr = self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
db.session.flush()
- unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
self.assertIn((unused_qr.id,), models.QueryResult.unused())
self.assertNotIn((qr.id,), list(models.QueryResult.unused()))
def test_returns_only_over_a_week_old_results(self):
two_weeks_ago = utcnow() - datetime.timedelta(days=14)
- unused_qr = self.factory.create_query_result(retrieved_at=two_weeks_ago)
+ qt = "SELECT 1"
+ unused_qr = self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
db.session.flush()
- new_unused_qr = self.factory.create_query_result()
-
+ new_unused_qr = self.factory.create_query_result(query_text=qt)
self.assertIn((unused_qr.id,), models.QueryResult.unused())
self.assertNotIn((new_unused_qr.id,), models.QueryResult.unused())
+ def test_doesnt_return_live_incremental_results(self):
+ two_weeks_ago = utcnow() - datetime.timedelta(days=14)
+ qt = "SELECT 1"
+ qrs = [self.factory.create_query_result(query_text=qt, retrieved_at=two_weeks_ago)
+ for _ in range(5)]
+ q = self.factory.create_query(query_text=qt, latest_query_data=qrs[0],
+ schedule_resultset_size=3)
+ for qr in qrs:
+ self.factory.create_query_resultset(query_rel=q, result=qr)
+ db.session.flush()
+ self.assertEqual([], list(models.QueryResult.unused()))
+
+ def test_deletes_stale_resultsets(self):
+ qt = "SELECT 17"
+ query = self.factory.create_query(query_text=qt,
+ schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt)
+ self.factory.create_query_resultset(query_rel=query, result=r)
+ qt2 = "SELECT 100"
+ query2 = self.factory.create_query(query_text=qt2, schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt2)
+ self.factory.create_query_resultset(query_rel=query2, result=r)
+ db.session.flush()
+ self.assertEqual(models.QueryResultSet.query.count(), 20)
+ self.assertEqual(models.Query.delete_stale_resultsets(), 10)
+ self.assertEqual(models.QueryResultSet.query.count(), 10)
+
+ def test_deletes_stale_resultsets_with_dupe_queries(self):
+ qt = "SELECT 17"
+ query = self.factory.create_query(query_text=qt,
+ schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt)
+ self.factory.create_query_resultset(query_rel=query, result=r)
+ query2 = self.factory.create_query(query_text=qt,
+ schedule_resultset_size=3)
+ for _ in range(10):
+ self.factory.create_query_result(query_text=qt)
+ self.factory.create_query_resultset(query_rel=query2)
+ qt2 = "SELECT 100"
+ query3 = self.factory.create_query(query_text=qt2, schedule_resultset_size=5)
+ for _ in range(10):
+ r = self.factory.create_query_result(query_text=qt2)
+ self.factory.create_query_resultset(query_rel=query3, result=r)
+ db.session.flush()
+ self.assertEqual(models.QueryResultSet.query.count(), 30)
+ self.assertEqual(models.Query.delete_stale_resultsets(), 10)
+ self.assertEqual(models.QueryResultSet.query.count(), 13)
+
class TestQueryAll(BaseTestCase):
def test_returns_only_queries_in_given_groups(self):