Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified .DS_Store
Binary file not shown.
6 changes: 3 additions & 3 deletions explore-assistant-examples/.env
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
##Update the variables in this environment file to automate the bash scripts for loading & updating the examples

PROJECT_ID="PROJECT_ID" ##Required. The Google Cloud project ID where your BigQuery dataset resides.
DATASET_ID="DATASET_ID" ##The ID of the BigQuery dataset. Defaults to explore_assistant.
EXPLORE_ID="MODEL:EXPLORE_ID" ##Required. A unique identifier for the dataset rows related to a specific use case or query (used in deletion and insertion).
PROJECT_ID=seraphic-ripsaw-360618
DATASET_ID=explore_assistant
EXPLORE_ID=nabc:spins_nlp
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"#Convert CSV to JSON\n",
"import csv\n",
"import json\n",
"\n",
"\n",
"def csv_to_json(csv_file, json_file):\n",
" \"\"\"Converts a CSV file to a JSON file.\n",
"\n",
"\n",
" Args:\n",
" csv_file: The path to the CSV file.\n",
" json_file: The path to the output JSON file.\n",
" \"\"\"\n",
"\n",
"\n",
" data = []\n",
" with open(csv_file, 'r') as csvfile:\n",
" csvreader = csv.DictReader(csvfile)\n",
" for row in csvreader:\n",
" data.append(dict(row))\n",
"\n",
"\n",
" with open(json_file, 'w') as jsonfile:\n",
" json.dump(data, jsonfile, indent=4)\n",
"\n",
"\n",
"\n",
"\n",
"# Example usage\n",
"csv_file = 'DMi EA Prompts - Explore Assistant Order Details - Cleansed.csv'\n",
"json_file = 'dmi_examples.json'\n",
"csv_to_json(csv_file, json_file)\n",
"print(f\"CSV converted to JSON: {json_file}\")"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
56 changes: 56 additions & 0 deletions explore-assistant-examples/convert_examples.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"#Convert CSV to JSON\n",
"import csv\n",
"import json\n",
"\n",
"\n",
"def csv_to_json(csv_file, json_file):\n",
" \"\"\"Converts a CSV file to a JSON file.\n",
"\n",
"\n",
" Args:\n",
" csv_file: The path to the CSV file.\n",
" json_file: The path to the output JSON file.\n",
" \"\"\"\n",
"\n",
"\n",
" data = []\n",
" with open(csv_file, 'r') as csvfile:\n",
" csvreader = csv.DictReader(csvfile)\n",
" for row in csvreader:\n",
" data.append(dict(row))\n",
"\n",
"\n",
" with open(json_file, 'w') as jsonfile:\n",
" json.dump(data, jsonfile, indent=4)\n",
"\n",
"\n",
"\n",
"\n",
"# Example usage\n",
"csv_file = '/Users/kalib/Downloads/NABC Examples - examples_cleansed.csv'\n",
"json_file = 'nabc_examples.json'\n",
"csv_to_json(csv_file, json_file)\n",
"print(f\"CSV converted to JSON: {json_file}\")"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
2 changes: 1 addition & 1 deletion explore-assistant-examples/load_examples.sh
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

source .env
TABLE_ID="explore_assistant_examples" ##The ID of the BigQuery table where the data will be inserted. Set to explore_assistant_examples.
JSON_FILE="examples.json" ##The path to the JSON file containing the data to be loaded. Set to examples.json.
JSON_FILE="nabc_examples.json" ##The path to the JSON file containing the data to be loaded. Set to examples.json.

python load_examples.py \
--project_id $PROJECT_ID \
Expand Down
1,022 changes: 1,022 additions & 0 deletions explore-assistant-examples/nabc_examples.json

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions explore-assistant-examples/samples.json
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
[
{
"category": "Cohorting",
"prompt": "Count of Users by first purchase date"
"category": "Performance",
"prompt": "What parent companies sell the most Fresh Blueberries by units?"
},
{
"category": "Audience Building",
"prompt":"Users who have purchased more than 100 dollars worth of Calvin Klein products and have purchased in the last 30 days"
"category": "Volume Trends",
"prompt":"What is the trend in sales volume for blueberries over the last year?"
},
{
"category": "Period Comparison",
"prompt": "Total revenue by category this year compared to last year in a line chart with year pivoted"
"category": "Seasonality",
"prompt": "Do blueberry sales volumes peak during specific seasons?"
}
]
Empty file modified explore-assistant-examples/update_refinements.sh
100644 → 100755
Empty file.
Empty file modified explore-assistant-examples/update_samples.sh
100644 → 100755
Empty file.
Binary file modified explore-assistant-extension/.DS_Store
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
export const looker_filters_interval_tf:string= `

## Intervals

Interval options
Expand Down Expand Up @@ -134,7 +132,4 @@ This will split up each second into intervals with the specified number of milli
2014-09-01 01:00:00.500
2014-09-01 01:00:00.750
To give an example, a row with a time of 2014-09-01 01:00:00.333 would have a millisecond250 of 2014-09-01 01:00:00.250.
`


export default looker_filters_interval_tf;
|
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
# Expanded URL generation

JSON Payload Fields:

fields: fields=view.field_1,view.field_2,view.count
This parameter specifies the list of fields to be included in the results. In this example, the explore will return data for view.field_1, view.field_2, and the count of rows (view.count).

f[]: &f[view.filter_1_dimension]={{ value }} & &f[view.filter_2_on_date]=last+60+days
This parameter defines filters for the explore. The f[] syntax is used to declare a filter on a specific dimension (view.filter_1_dimension and view.filter_2_on_date in this case). The {{ value }} placeholder indicates a dynamic value that can be passed through the URL. The second filter uses a Looker expression (last+60+days) to filter data for the past 60 days.

pivots: pivots=view.field_2 This parameter defines the dimension to pivot on. In this example, view.field_2 will be used to create a pivot table.

limit: limit=50 This parameter sets the maximum number of rows to be returned by the explore. The default limit is 5000, but here it's explicitly set to 50.

column_limit: column_limit=20 This parameter sets the maximum number of columns to be displayed in the pivot table. This parameter only has an effect when a pivot dimension is specified (as seen with pivots). The column_limit can be between 1 and 200. Dimensions, dimension table calculations, row total columns, and measure table calculations outside of pivots are not counted toward the column limit. Pivoted groups each count as one column toward the column limit.

total: total=true This parameter controls whether to display column totals in the explore results. Here, true indicates that column totals will be shown.

row_total: row_total=right This parameter controls whether to display row totals in the explore results. Here, right specifies that the row totals will be displayed on the right side. Only use row totals if the chart contains pivots.

sorts: sorts=view.field_1,view.count+desc This parameter defines the order in which the results should be sorted. The first field (view.field_1) is sorted by default in ascending order. The second sort (view.count+desc) sorts the results by view.count in descending order. The +desc syntax specifies descending order.

filter_config: The filter_config parameter contains detailed JSON objects that control the filtering aspects of the query. The filter_config represents the state of the filter UI on the explore page for a given query. When running a query via the Looker UI, this parameter takes precedence over "filters".

Vis: The vis parameter contains detailed JSON objects that control the visualization properties of the query. These properties are typically opaque and differ based on the type of visualization used. There is no specified set of allowed keys. The values can be any type supported by JSON. A "type" key with a string value is often present, and is used by Looker to determine which visualization to present. Visualizations ignore unknown vis_config properties.

Query_timezone: User's timezone, string value.

Subtotals: When using a table visualization and your data table contains at least two dimensions, you can apply subtotals. Subtotals are not available when you filter on a measure or when the Explore uses the sql_always_having parameter. List of fields to run the subtotals. The leftmost subtotal is always sorted. When you sort by multiple columns, subtotal columns are given precedence. Fields on which to run subtotals.

# Pivot table reference

In Looker, pivots allow you to turn a selected dimension into several columns, which creates a matrix of your data similar to a pivot table in spreadsheet software. This is very useful for analyzing metrics by different groupings of your data, such as getting counts for category or label in your dataset.

When you pivot on a dimension, each unique possible value of that dimension becomes its own column header. Any measures are then repeated under each column header.

Pivots make it much easier to compare a measure accross dimensions. It also shows you gaps in your data, where you don’t have any numeric values for a particular dimension field. In summary, pivots allow you to create and display a matrix of your data, similar to a pivot table in spreadsheet software. Specifically, pivots turn a selected dimension into several columns and are applied only to the visual display of your results.

With pivots, Looker allows you to regroup your data, so that you can easily compare results by different groupings and identify potential gaps, all while leaving your underlying data unaffected.

Whenever you have a question involving one dimension “by” another dimension, that’s a clue that a pivot might come in handy.

When two time dimensions are in a report and a pivot is required, always pivot by the least granular time dimension.

|Example | Pivoted Dimension |
|--------------------------------------------------------------------------|----------------------------------|
| What were the hourly total sales by day in the past 3 days? | Day |
| What were the daily total sales by week in the past 3 weeks? | Week |
| What were the total sales by day each week in the past 2 weeks? | Week |
| What were the total sales by day of week each week in the past 2 weeks? | Week |
| What were the weekly total sales by month in the past 2 months? | Month |
| What were the monthly total sales by quarter in the past 2 quarters? | Quarter |
| What were the monthly total sales by quarter in the past 2 years? | Year |
| What were the total sales by week of year each year in the past 2 years? | Year |
| What were the monthly total sales by year in the past 2 years? | Year |
| What were the weekly total sales by quarter in the past 3 years? | Quarter |

# Looker API JSON Fields

|application/JSON | Datatype | Description
|--------------------------|------------------------|------------------------------------------------------------------------------------------
| can | Hash[boolean] | Operations the current user is able to perform on this object
| id | string | Unique Id
| model | string | Model
| view | string | Explore Name
| fields | string[] | Fields
| pivots | string[] | Pivots
| fill_fields | string[] | Fill Fields
| filters | Hash[string] | Filters will contain data pertaining to complex filters that do not contain "or" conditions. When "or" conditions are present, filter data will be found on the filter_expression property.
| filter_expression | string | Filter Expression
| sorts | string[] | Sorting for the query results. Use the format ["view.field", ...] to sort on fields in ascending order. Use the format ["view.field desc", ...] to sort on fields in descending order. Use ["__UNSORTED__"] (2 underscores before and after) to disable sorting entirely. Empty sorts [] will trigger a default sort.
| limit | string | Row limit. To download unlimited results, set the limit to -1 (negative one).
| column_limit | string | Column Limit
| total | boolean | Total
| row_total | string | Raw Total
| subtotals | string[] | Fields on which to run subtotals
| vis_config | Hash[any] | Visualization configuration properties. These properties are typically opaque and differ based on the type of visualization used. There is no specified set of allowed keys. The values can be any type supported by JSON. A "type" key with a string value is often present, and is used by Looker to determine which visualization to present. Visualizations ignore unknown vis_config properties.
| filter_config | Hash[any] | The filter_config represents the state of the filter UI on the explore page for a given query. When running a query via the Looker UI, this parameter takes precedence over "filters". When creating a query or modifying an existing query, "filter_config" should be set to null. Setting it to any other value could cause unexpected filtering behavior. The format should be considered opaque.
| visible_ui_sections | string | Visible UI Sections
| slug | string | Slug
| dynamic_fields | string | Dynamic Fields
| client_id | string | Client Id: used to generate shortened explore URLs. If set by client, must be a unique 22 character alphanumeric string. Otherwise one will be generated.
| share_url | string | Share Url
| expanded_share_url | string | Expanded Share Url
| url | string | Expanded Url
| query_timezone | string | Query Timezone
| has_table_calculations | boolean | Has Table Calculations
|
Loading