/** * @license * Copyright 2017 Google LLC * SPDX-License-Identifier: BSD-3-Clause */ import { noChange } from '../lit-html.js'; import { directive, Directive, PartType } from '../directive.js'; import { insertPart, getCommittedValue, removePart, setCommittedValue, setChildPartValue, } from '../directive-helpers.js'; // Helper for generating a map of array item to its index over a subset // of an array (used to lazily generate `newKeyToIndexMap` and // `oldKeyToIndexMap`) const generateMap = (list, start, end) => { const map = new Map(); for (let i = start; i <= end; i++) { map.set(list[i], i); } return map; }; class RepeatDirective extends Directive { constructor(partInfo) { super(partInfo); if (partInfo.type !== PartType.CHILD) { throw new Error('repeat() can only be used in text expressions'); } } _getValuesAndKeys(items, keyFnOrTemplate, template) { let keyFn; if (template === undefined) { template = keyFnOrTemplate; } else if (keyFnOrTemplate !== undefined) { keyFn = keyFnOrTemplate; } const keys = []; const values = []; let index = 0; for (const item of items) { keys[index] = keyFn ? keyFn(item, index) : index; values[index] = template(item, index); index++; } return { values, keys, }; } render(items, keyFnOrTemplate, template) { return this._getValuesAndKeys(items, keyFnOrTemplate, template).values; } update(containerPart, [items, keyFnOrTemplate, template]) { // Old part & key lists are retrieved from the last update (which may // be primed by hydration) const oldParts = getCommittedValue(containerPart); const { values: newValues, keys: newKeys } = this._getValuesAndKeys(items, keyFnOrTemplate, template); // We check that oldParts, the committed value, is an Array as an // indicator that the previous value came from a repeat() call. If // oldParts is not an Array then this is the first render and we return // an array for lit-html's array handling to render, and remember the // keys. if (!Array.isArray(oldParts)) { this._itemKeys = newKeys; return newValues; } // In SSR hydration it's possible for oldParts to be an array but for us // to not have item keys because the update() hasn't run yet. We set the // keys to an empty array. This will cause all oldKey/newKey comparisons // to fail and execution to fall to the last nested brach below which // reuses the oldPart. const oldKeys = (this._itemKeys ??= []); // New part list will be built up as we go (either reused from // old parts or created for new keys in this update). This is // saved in the above cache at the end of the update. const newParts = []; // Maps from key to index for current and previous update; these // are generated lazily only when needed as a performance // optimization, since they are only required for multiple // non-contiguous changes in the list, which are less common. let newKeyToIndexMap; let oldKeyToIndexMap; // Head and tail pointers to old parts and new values let oldHead = 0; let oldTail = oldParts.length - 1; let newHead = 0; let newTail = newValues.length - 1; // Overview of O(n) reconciliation algorithm (general approach // based on ideas found in ivi, vue, snabbdom, etc.): // // * We start with the list of old parts and new values (and // arrays of their respective keys), head/tail pointers into // each, and we build up the new list of parts by updating // (and when needed, moving) old parts or creating new ones. // The initial scenario might look like this (for brevity of // the diagrams, the numbers in the array reflect keys // associated with the old parts or new values, although keys // and parts/values are actually stored in parallel arrays // indexed using the same head/tail pointers): // // oldHead v v oldTail // oldKeys: [0, 1, 2, 3, 4, 5, 6] // newParts: [ , , , , , , ] // newKeys: [0, 2, 1, 4, 3, 7, 6] <- reflects the user's new // item order // newHead ^ ^ newTail // // * Iterate old & new lists from both sides, updating, // swapping, or removing parts at the head/tail locations // until neither head nor tail can move. // // * Example below: keys at head pointers match, so update old // part 0 in-place (no need to move it) and record part 0 in // the `newParts` list. The last thing we do is advance the // `oldHead` and `newHead` pointers (will be reflected in the // next diagram). // // oldHead v v oldTail // oldKeys: [0, 1, 2, 3, 4, 5, 6] // newParts: [0, , , , , , ] <- heads matched: update 0 // newKeys: [0, 2, 1, 4, 3, 7, 6] and advance both oldHead // & newHead // newHead ^ ^ newTail // // * Example below: head pointers don't match, but tail // pointers do, so update part 6 in place (no need to move // it), and record part 6 in the `newParts` list. Last, // advance the `oldTail` and `oldHead` pointers. // // oldHead v v oldTail // oldKeys: [0, 1, 2, 3, 4, 5, 6] // newParts: [0, , , , , , 6] <- tails matched: update 6 // newKeys: [0, 2, 1, 4, 3, 7, 6] and advance both oldTail // & newTail // newHead ^ ^ newTail // // * If neither head nor tail match; next check if one of the // old head/tail items was removed. We first need to generate // the reverse map of new keys to index (`newKeyToIndexMap`), // which is done once lazily as a performance optimization, // since we only hit this case if multiple non-contiguous // changes were made. Note that for contiguous removal // anywhere in the list, the head and tails would advance // from either end and pass each other before we get to this // case and removals would be handled in the final while loop // without needing to generate the map. // // * Example below: The key at `oldTail` was removed (no longer // in the `newKeyToIndexMap`), so remove that part from the // DOM and advance just the `oldTail` pointer. // // oldHead v v oldTail // oldKeys: [0, 1, 2, 3, 4, 5, 6] // newParts: [0, , , , , , 6] <- 5 not in new map: remove // newKeys: [0, 2, 1, 4, 3, 7, 6] 5 and advance oldTail // newHead ^ ^ newTail // // * Once head and tail cannot move, any mismatches are due to // either new or moved items; if a new key is in the previous // "old key to old index" map, move the old part to the new // location, otherwise create and insert a new part. Note // that when moving an old part we null its position in the // oldParts array if it lies between the head and tail so we // know to skip it when the pointers get there. // // * Example below: neither head nor tail match, and neither // were removed; so find the `newHead` key in the // `oldKeyToIndexMap`, and move that old part's DOM into the // next head position (before `oldParts[oldHead]`). Last, // null the part in the `oldPart` array since it was // somewhere in the remaining oldParts still to be scanned // (between the head and tail pointers) so that we know to // skip that old part on future iterations. // // oldHead v v oldTail // oldKeys: [0, 1, -, 3, 4, 5, 6] // newParts: [0, 2, , , , , 6] <- stuck: update & move 2 // newKeys: [0, 2, 1, 4, 3, 7, 6] into place and advance // newHead // newHead ^ ^ newTail // // * Note that for moves/insertions like the one above, a part // inserted at the head pointer is inserted before the // current `oldParts[oldHead]`, and a part inserted at the // tail pointer is inserted before `newParts[newTail+1]`. The // seeming asymmetry lies in the fact that new parts are // moved into place outside in, so to the right of the head // pointer are old parts, and to the right of the tail // pointer are new parts. // // * We always restart back from the top of the algorithm, // allowing matching and simple updates in place to // continue... // // * Example below: the head pointers once again match, so // simply update part 1 and record it in the `newParts` // array. Last, advance both head pointers. // // oldHead v v oldTail // oldKeys: [0, 1, -, 3, 4, 5, 6] // newParts: [0, 2, 1, , , , 6] <- heads matched: update 1 // newKeys: [0, 2, 1, 4, 3, 7, 6] and advance both oldHead // & newHead // newHead ^ ^ newTail // // * As mentioned above, items that were moved as a result of // being stuck (the final else clause in the code below) are // marked with null, so we always advance old pointers over // these so we're comparing the next actual old value on // either end. // // * Example below: `oldHead` is null (already placed in // newParts), so advance `oldHead`. // // oldHead v v oldTail // oldKeys: [0, 1, -, 3, 4, 5, 6] <- old head already used: // newParts: [0, 2, 1, , , , 6] advance oldHead // newKeys: [0, 2, 1, 4, 3, 7, 6] // newHead ^ ^ newTail // // * Note it's not critical to mark old parts as null when they // are moved from head to tail or tail to head, since they // will be outside the pointer range and never visited again. // // * Example below: Here the old tail key matches the new head // key, so the part at the `oldTail` position and move its // DOM to the new head position (before `oldParts[oldHead]`). // Last, advance `oldTail` and `newHead` pointers. // // oldHead v v oldTail // oldKeys: [0, 1, -, 3, 4, 5, 6] // newParts: [0, 2, 1, 4, , , 6] <- old tail matches new // newKeys: [0, 2, 1, 4, 3, 7, 6] head: update & move 4, // advance oldTail & newHead // newHead ^ ^ newTail // // * Example below: Old and new head keys match, so update the // old head part in place, and advance the `oldHead` and // `newHead` pointers. // // oldHead v oldTail // oldKeys: [0, 1, -, 3, 4, 5, 6] // newParts: [0, 2, 1, 4, 3, ,6] <- heads match: update 3 // newKeys: [0, 2, 1, 4, 3, 7, 6] and advance oldHead & // newHead // newHead ^ ^ newTail // // * Once the new or old pointers move past each other then all // we have left is additions (if old list exhausted) or // removals (if new list exhausted). Those are handled in the // final while loops at the end. // // * Example below: `oldHead` exceeded `oldTail`, so we're done // with the main loop. Create the remaining part and insert // it at the new head position, and the update is complete. // // (oldHead > oldTail) // oldKeys: [0, 1, -, 3, 4, 5, 6] // newParts: [0, 2, 1, 4, 3, 7 ,6] <- create and insert 7 // newKeys: [0, 2, 1, 4, 3, 7, 6] // newHead ^ newTail // // * Note that the order of the if/else clauses is not // important to the algorithm, as long as the null checks // come first (to ensure we're always working on valid old // parts) and that the final else clause comes last (since // that's where the expensive moves occur). The order of // remaining clauses is is just a simple guess at which cases // will be most common. // // * Note, we could calculate the longest // increasing subsequence (LIS) of old items in new position, // and only move those not in the LIS set. However that costs // O(nlogn) time and adds a bit more code, and only helps // make rare types of mutations require fewer moves. The // above handles removes, adds, reversal, swaps, and single // moves of contiguous items in linear time, in the minimum // number of moves. As the number of multiple moves where LIS // might help approaches a random shuffle, the LIS // optimization becomes less helpful, so it seems not worth // the code at this point. Could reconsider if a compelling // case arises. while (oldHead <= oldTail && newHead <= newTail) { if (oldParts[oldHead] === null) { // `null` means old part at head has already been used // below; skip oldHead++; } else if (oldParts[oldTail] === null) { // `null` means old part at tail has already been used // below; skip oldTail--; } else if (oldKeys[oldHead] === newKeys[newHead]) { // Old head matches new head; update in place newParts[newHead] = setChildPartValue(oldParts[oldHead], newValues[newHead]); oldHead++; newHead++; } else if (oldKeys[oldTail] === newKeys[newTail]) { // Old tail matches new tail; update in place newParts[newTail] = setChildPartValue(oldParts[oldTail], newValues[newTail]); oldTail--; newTail--; } else if (oldKeys[oldHead] === newKeys[newTail]) { // Old head matches new tail; update and move to new tail newParts[newTail] = setChildPartValue(oldParts[oldHead], newValues[newTail]); insertPart(containerPart, newParts[newTail + 1], oldParts[oldHead]); oldHead++; newTail--; } else if (oldKeys[oldTail] === newKeys[newHead]) { // Old tail matches new head; update and move to new head newParts[newHead] = setChildPartValue(oldParts[oldTail], newValues[newHead]); insertPart(containerPart, oldParts[oldHead], oldParts[oldTail]); oldTail--; newHead++; } else { if (newKeyToIndexMap === undefined) { // Lazily generate key-to-index maps, used for removals & // moves below newKeyToIndexMap = generateMap(newKeys, newHead, newTail); oldKeyToIndexMap = generateMap(oldKeys, oldHead, oldTail); } if (!newKeyToIndexMap.has(oldKeys[oldHead])) { // Old head is no longer in new list; remove removePart(oldParts[oldHead]); oldHead++; } else if (!newKeyToIndexMap.has(oldKeys[oldTail])) { // Old tail is no longer in new list; remove removePart(oldParts[oldTail]); oldTail--; } else { // Any mismatches at this point are due to additions or // moves; see if we have an old part we can reuse and move // into place const oldIndex = oldKeyToIndexMap.get(newKeys[newHead]); const oldPart = oldIndex !== undefined ? oldParts[oldIndex] : null; if (oldPart === null) { // No old part for this value; create a new one and // insert it const newPart = insertPart(containerPart, oldParts[oldHead]); setChildPartValue(newPart, newValues[newHead]); newParts[newHead] = newPart; } else { // Reuse old part newParts[newHead] = setChildPartValue(oldPart, newValues[newHead]); insertPart(containerPart, oldParts[oldHead], oldPart); // This marks the old part as having been used, so that // it will be skipped in the first two checks above oldParts[oldIndex] = null; } newHead++; } } } // Add parts for any remaining new values while (newHead <= newTail) { // For all remaining additions, we insert before last new // tail, since old pointers are no longer valid const newPart = insertPart(containerPart, newParts[newTail + 1]); setChildPartValue(newPart, newValues[newHead]); newParts[newHead++] = newPart; } // Remove any remaining unused old parts while (oldHead <= oldTail) { const oldPart = oldParts[oldHead++]; if (oldPart !== null) { removePart(oldPart); } } // Save order of new parts for next round this._itemKeys = newKeys; // Directly set part value, bypassing it's dirty-checking setCommittedValue(containerPart, newParts); return noChange; } } /** * A directive that repeats a series of values (usually `TemplateResults`) * generated from an iterable, and updates those items efficiently when the * iterable changes based on user-provided `keys` associated with each item. * * Note that if a `keyFn` is provided, strict key-to-DOM mapping is maintained, * meaning previous DOM for a given key is moved into the new position if * needed, and DOM will never be reused with values for different keys (new DOM * will always be created for new keys). This is generally the most efficient * way to use `repeat` since it performs minimum unnecessary work for insertions * and removals. * * The `keyFn` takes two parameters, the item and its index, and returns a unique key value. * * ```js * html` *