## When Huffman coding is inefficient?

I have a question regarding the redundancy of Huffman coding. I know that for a general prefix code we have the following inequality:

H(X) <= R <= H(X) + 1

R being the rate (average codeword lenght) and H is the entropy. Based on this relation, how can we coclude that Huffman coding is very inefficient is entropy of the source is much smaller than 1 bit/symbol?

## First Huffman Compression Algorithm in C++

I decided to take up learning c++ recently, so I coded a Huffman compression algorithm. I’m particularly interested in critique of my c++ techniques (pointers, references, best practices, etc), but I am open to any comments you have.

Thanks for you time!

#include <map> #include <iostream> #include <vector> #include <deque> #include <string> #include <algorithm>  typedef std::pair<char, int> weight_pair;  struct node {     int weight;     node *parent = NULL;     node *children = {NULL, NULL};     std::string content; };  bool compareNodeWeights(const node *node1, const node *node2) {      return (node1 -> weight) < (node2 -> weight); }  //builds a binary tree from an alphabet and an associated set of weights node *build_tree(const std::map<char, int> &weights){     std::deque<node*> nodes;      for (auto const &x : weights) {         node *leaf_node = new node;          leaf_node -> weight = x.second;         leaf_node -> content = std::string(1, x.first);          nodes.push_back(leaf_node);     }      while (nodes.size() > 1) {          std::sort(nodes.begin(), nodes.end(), compareNodeWeights);          node* new_node = new node;          new_node -> weight = nodes -> weight + nodes -> weight;         new_node -> content = nodes -> content + nodes -> content;         nodes -> parent = new_node;         nodes -> parent = new_node;         new_node -> children = nodes;         new_node -> children = nodes;          nodes.erase(nodes.begin());         nodes.erase(nodes.begin());          nodes.push_back(new_node);     }      return nodes;  }  //traverses the tree to find the prefix code for a given leaf node in a tree std::deque<bool> find_prefix_code(const node *leaf) {     std::deque<bool> result;     const node *curr_node = leaf;      while (curr_node -> parent != NULL) {         if (curr_node -> parent -> children -> content == curr_node -> content) {             result.push_front(0);            }          else {             result.push_front(1);            }          curr_node = curr_node -> parent;      }      return result; }  std::vector<bool> compress(const std::string &message, const node *tree) {     std::vector<bool> result;      std::vector<const node*> open;     open.push_back(tree);      std::map<char, std::deque<bool>> prefix_codes;      while (open.size() > 0) {         const node* curr_node = open;           if (curr_node -> content.size() == 1){              prefix_codes.insert( std::pair<char, std::deque<bool>>(curr_node -> content, find_prefix_code(curr_node)) );          }          else {             open.push_back(curr_node -> children);                open.push_back(curr_node -> children);            }          open.erase(open.begin());        }      for (const char c : message) {         for (const bool &b : prefix_codes[c]) {             result.push_back(b);         }            }       return result; }  std::string decompress(const std::vector<bool> &data, const node *tree) {     const node *curr_node = tree;     std::string result = "";      for (const bool b : data) {         int direction = b;          curr_node = curr_node -> children[direction];          if (curr_node ->content.size() == 1) {             result += curr_node -> content;             curr_node = tree;            }     }      return result; }  void print_compressed(const std::vector<bool> &data) {     std::cout << "Compressed data: ";      for (const bool b : data) {         std::cout << b;      }      std::cout <<  std::endl; }   void delete_tree(node *tree) {      for (int i = 0; i <= 1; i++) {         if (tree -> children[i] != NULL) {             delete_tree(tree -> children[i]);         }     }      delete tree; }  int main() {     std::map<char, int> weights;      weights.insert(weight_pair(' ', 3));     weights.insert(weight_pair('a', 3));     weights.insert(weight_pair('d', 3));     weights.insert(weight_pair('b', 1));     weights.insert(weight_pair('c', 1));      node *tree = build_tree(weights);        std::vector<bool> compressed_message = compress("a cab dab bad", tree);     print_compressed(compressed_message);         std::string message = decompress(compressed_message, tree);     std::cout << "Decompressed data: " << message << std::endl;      delete_tree(tree);      return 0; } 
Posted on Categories buy elite proxies

## Huffman Codes C++

This is my version of the Huffman Codes. I really appreciate all your advices and I thank you in advance for taking your time to read through my code.

Cheers, SM

/*     Author: Stevan Milic     Date: 05.05.2018.     Course: Data Structures II     Professor: Dr. Claude Chaudet     Description: Huffman Codes */ #include <iostream>  #include <cstdlib>  using namespace std; #define MAX_TREE_HEIGHT 1000  // A Huffman tree node  struct MinHeapNode  {     char codeword; // I chose char because we are inputing alphabetic       letters      // The reason why I chose unsigned data type is because an unsigned  integer can never be negative.     // In this case the frequency and the capacity of a character cannot be negative.     unsigned freq; // Frequency of the character - how many times does it occur      struct MinHeapNode *left, *right; // Left and Right children };  struct MinHeap // Collection of nodes {     unsigned size; // Size of the heap     unsigned capacity; // Capacity of the heap     struct MinHeapNode** array; // Heap node pointers array };  // Function to dynamically alocate a new heap node with provided character   (codeword) and its frequency struct MinHeapNode* newHeapNode(char codeword, unsigned freq) {     struct MinHeapNode* temp = (struct MinHeapNode*)malloc(sizeof(struct   MinHeapNode));      temp->left = temp->right = NULL;     temp->codeword = codeword;     temp->freq = freq;      return temp; }  // Creating a new dynamically allocated min heap with given capacity struct MinHeap* createMinHeap(unsigned capacity) {     struct MinHeap* minHeap = (struct MinHeap*)malloc(sizeof(struct MinHeap));     minHeap->size = 0; // Setting the size to 0     minHeap->capacity = capacity; // Inserting the given capacity     // Inserting into the heap node pointers array     minHeap->array= (struct MinHeapNode**)malloc(minHeap->capacity * sizeof(struct MinHeapNode*));      return minHeap; }  // Swap function to swap two min heap nodes void swap(struct MinHeapNode** a, struct MinHeapNode** b) {     struct MinHeapNode* temp2 = *a;     *a = *b;     *b = temp2; } // minHeapify function  void minHeapify(struct MinHeap* minHeap, int index) {     int smallest = index;     int leftSon = 2 * index + 1;     int rightSon = 2 * index + 2;      if (leftSon < minHeap->size && minHeap->array[leftSon]->freq <      minHeap->array[smallest]->freq)         smallest = leftSon;      if (rightSon < minHeap->size && minHeap->array[rightSon]-> freq < minHeap->array[smallest]->freq)         smallest = rightSon;      if (smallest != index)     {         swap(&minHeap->array[smallest], &minHeap->array[index]);         minHeapify(minHeap, smallest);     } }  // Checking if the size of the heap is 1 int heapSizeOne(struct MinHeap* minHeap) {     return (minHeap->size == 1); }  // Extracting minimum value node from the heap struct MinHeapNode* extractMin(struct MinHeap* minHeap) {     struct MinHeapNode* temp = minHeap->array;     minHeap->array = minHeap->array[minHeap->size - 1];      --minHeap->size;     minHeapify(minHeap, 0);     return temp; }  // Inserting a new node into min heap void insert(struct MinHeap* minHeap, struct MinHeapNode* minHeapNode) {     ++minHeap->size;     int i = minHeap->size - 1;     while (i && minHeapNode->freq < minHeap->array[(i - 1) / 2]->freq)      {         minHeap->array[i] = minHeap->array[(i - 1) / 2];         i = (i - 1) / 2;     }     minHeap->array[i] = minHeapNode; }  // Build function to build min heap void build(struct MinHeap* minHeap) {     int n = minHeap->size - 1;     for (int i = (n - 1) / 2; i >= 0; --i)         minHeapify(minHeap, i); }  // Display function to print an array void display(int arr[], int n) {     int i;     for (i = 0; i < n; ++i)         cout << arr[i];     cout << "\n"; }  // Function to check if the node is a leaf int isLeaf(struct MinHeapNode* root) {     return !(root->left) && !(root->right); }   // Creating a min heap with given capacity equivalent to size and inserts all    the codewords and their frequency. struct MinHeap* create(char codeword[], int freq[], int size) {     struct MinHeap* minHeap = createMinHeap(size);     for (int i = 0; i < size; ++i)         minHeap->array[i] = newHeapNode(codeword[i], freq[i]);     minHeap->size = size;     build(minHeap);     return minHeap; }  // Function that builds the Huffman tree  struct MinHeapNode* buildHT(char codeword[], int freq[], int size) {     struct MinHeapNode *left, *right, *top;      // Creating a min heap with given capacity equivalent to size and inserts all the codewords and their frequency.     struct MinHeap* minHeap = create(codeword, freq, size);      // while loop runs as long as the size of heap doesn't reach 1      while (!heapSizeOne(minHeap))      {         // Getting the two minimums from min heap         left = extractMin(minHeap);         right = extractMin(minHeap);          // The frequency of top is computed as the sum of the frequencies of left and right nodes.          top = newHeapNode('_', left->freq + right->freq);         top->left = left;         top->right = right;         insert(minHeap, top);     }     // The remaining value is the root node which completes the tree     return extractMin(minHeap); }  // Prints huffman codes from the root of // Displaying Huffman codes void displayHC(struct MinHeapNode* root, int arr[], int top) {      // Left side is given the value 0      if (root->left)      {         arr[top] = 0;         displayHC(root->left, arr, top + 1);     }     // Right side is given the value 1     if (root->right)      {         arr[top] = 1;         displayHC(root->right, arr, top + 1);     }     // If this is a leaf node, print the character and its code.     if (isLeaf(root))      {         cout << root->codeword << ": ";         display(arr, top);     } }  // Building a Huffman Tree and displaying the codes void HuffmanCodes(char codeword[], int freq[], int size)  {     // Building a HT     struct MinHeapNode* root = buildHT(codeword, freq, size);      // Displaying the HT we built     int arr[MAX_TREE_HEIGHT], top = 0;      displayHC(root, arr, top); }  // I used the example from the PP presentation in the Files section - The Hoffman Coding int main() {     cout << "A|4\t B|0\t C|2\t D|1\t C|5\t E|1\t F|0\t G|1\t H|1\t I|0\t J|0\t K|3\t L|2\t M|0\t N|1\t\nO|2\t P|0\t Q|3\t R|5\t S|4\t T|2\t U|0\t V|0\t W|1\t X|0\t Y|0\t Z|0\t _|6\n" << endl;     char arr[] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '_' };     int freq[] = { 4, 0, 2, 1, 5, 1, 0, 1, 1, 0, 0, 3, 2, 0, 1, 2, 0, 3, 5, 4, 2, 0, 0, 1, 0, 0, 6};      int size = sizeof(arr) / sizeof(arr);      HuffmanCodes(arr, freq, size);      cout << "\n\n";     system("pause");     return 0; } 

## Huffman – Upperbound Depth Given an alphabet with probabilities

Given an alphabet {C1, …, Ck} where the probability of the letter Ck is P(Ck). The frequency satisfies P(C1)> … >P(Ck) (i.e all probabilities are distinct). Derive an upper bound for the depth of C1 in a Huffman Tree.

Posted on Categories proxies

## Are Huffman codes self-synchronizing?

A code is (statistically) self-synchronizing if, given that the transmitted string is long enough, the receiver is guaranteed to eventually synchronize with the sender, even if bit flips or slips have occurred.

Do Huffman codes have this property in general? In case not, is there a criterion for testing if a Huffman code is self-synchronizing or, equivalently, is there a modified construction of a Huffman code which guarantees self-synchronization?

Posted on Categories proxies

## Huffman encoding with several codes already assigned

The classic Huffman algorithm, as Wikipedia states, finds an optimal prefix-free binary code with minimum expected codewords length, given a set of symbols and their weights. Now, suppose codewords for some (but not all) symbols are already assigned (maybe in a suboptimal way). How should we assign remaining codewords to the remaining symbols so as to minimize the minimum expected codewords length, assuming that there exists at least one binary sequence such that none existing codewords have it as their prefix, and neither of the existing codewords is its prefix?

I wonder if there’s a known solution to this problem. Yet I have been trying to develop an own one borrowing ideas from this proof.

Posted on Categories proxies

## Infinite Huffman Tree

We have to derive an optimal binary encoding for the infinite set of symbols $$\{s_1, s_2, \dots \}$$. They’re distribution is given by $$p(s_i) = 9 \cdot 10^{-i}$$

My intuition was to use a Huffman encoding. Obviously the Huffman encoding algorithm needs to first find the smallest two elements, which we can’t do. But, if we imagine the last step of the Huffman algorithm, it would have the two elements $$s_1$$ and the supersymbol $$s_2s_3s_4\dots$$ with probabilities $$0.9$$ and $$0.1$$ respectively. Thus, (if left is $$0$$ and right is $$1$$) symbol $$s_1$$ would get codeword $$0$$. The other symbols would all get a codeword starting with $$1$$. This tree is clearly self similar. If we represent binary trees as tuples with an empty tree as the nullset $$\varnothing$$, we have that this tree is given by

$$T = (\varnothing, T)$$

That is, a left branch which is empty an the right branch which is identical to the whole tree. Thus, we get the codewords

$$s_1 \mapsto 0, s_2 \mapsto 10, s_3 \mapsto 110, s_4 \mapsto 1110, \dots$$

Now, I know that these are optimal. You can do a proof by contradiction showing that changing any of these codewords and still satisfying the Kraft inequality will cause the expected length to increase.

My question is, is the fact that this infinite Huffman tree arrived at the answer a coincidence or is there a way to formalize Huffman encoding for an infinite set of symbols.

## Huffman tree compressing/decompressing in C

In a past course one of the assignments was to write a program that can compress files using Huffman Tree algorithm, and uncompress the files that the program generates.

My design is to count the byte occurrences first, then construct a HT based on the counted byte frequency.

My compressed file format is 256*4 bytes of “header” that stores the counted frequency, so it can be used to construct the tree when decompressing the file. Then there’s a 4-byte integer that indicates how many bits of the last byte is real data. The rest is the real (compressed) data.

Here is this specific version* of code that I want some feedback. Later versions introduced many messy changes (like GUI and buffered I/O) that is not necessary.

Specifically, I’m looking for feedback on my algorithm and data structure implementation, including but not limited to code style, best practices, potential flaws and defects (see below).

• An exception is the last two functions print_help and main. They’re intended to be as simple as possible, so they contain the bare minimum amount of code to work in a reasonable way. Data validation and error checking etc. are omitted on purpose.

In order to simplify the idea, during designing and coding, I have assumed that

• the program will not be told to uncompress an invalid file, so there’s no file validity check in the code
• file availability is ensured by the environment. It will always be a regular file, with no chance of generating a read error mid-way
• C library functions does not fail for environmental reasons (e.g. host is short of RAM for malloc(3), target disk out of space for fwrite(3) and consequently write(2), or fread(3) as said above)
• reading/writing byte-by-byte is fine, because a later version of this code introduced chunk I/O and got a bit messier (I think). Suggestions on making the code run faster without implementing chunk I/O is welcome

so I’m also not looking for feedbacks regarding the above things that I have assumed / intentionally ignored.

I have ensured that the code is working properly, with no warnings when compiled with this command (taken from make output)

gcc -O3 -std=c11 -Wall -Wno-unused-result -o huffman huffman.c 

The last option is to suppress the warning about unused result from fread(3).

During my coding process, I run clang-format occasionally and diff the output and my written code to check for potentially bad indentation / styling issues. I am not confident if it can solve everything.

* The link points to my GitHub repo. The code on that page is identical to the code submitted below verbatim.

// File: huffman.c // Author: iBug  #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h>  typedef unsigned char byte;  typedef struct _HuffNode {     unsigned data;     struct _HuffNode *left, *right, *parent; } HuffNode;  void count_frequency(FILE* fp, unsigned* freq) {     size_t orig_pos = ftell(fp);     int ch;     while (1) {         ch = fgetc(fp);         if (ch < 0)             break;         freq[ch]++;     }     fseek(fp, orig_pos, SEEK_SET); }  void construct_huffman(unsigned* freq_in, HuffNode* tree) {     int count = 256;     unsigned freq;     HuffNode *node;      // Initialize data     for (int i = 0; i < 256; i++) {         freq[i] = freq_in[i];         tree[i].data = i;         tree[i].left = tree[i].right = NULL;         node[i] = &tree[i];     }      // Sort by frequency, decreasing order     /* WARNING: Although this Quick Sort is an unstable sort,      * it should at least give the same result for the same input frequency table,      * therefore I'm leaving this code here      */     {         unsigned lower, upper, top = 1;         lower = 0, upper = 256;         while (top > 0) {             top--;             int left = lower[top], right = upper[top];             int i = left, j = right - 1, flag = 0;             if (i >= j) // Nothing to sort                 continue;             while (i < j) {                 if (freq[i] < freq[j]) {                     unsigned t = freq[i]; freq[i] = freq[j]; freq[j] = t;                     HuffNode *p = node[i]; node[i] = node[j]; node[j] = p;                     flag = !flag;                 }                 flag ? i++ : j--;             }             lower[top] = left, upper[top] = i;             lower[top + 1] = i + 1, upper[top + 1] = right;             top += 2;         }     }      // Construct tree     while (count > 1) {         int pos = 512 - count;         HuffNode *parent = &tree[pos];         // Select lowest 2 by freq         int i = count - 2, j = count - 1;         // Create tree, lower freq left         parent->left = node[j]; parent->right = node[i];         node[j]->parent = node[i]->parent = parent;         node[i] = parent;         freq[i] += freq[j];         // Insert         for (; i > 0 && freq[i] > freq[i - 1]; i--) {             unsigned t = freq[i]; freq[i] = freq[i - 1]; freq[i - 1] = t;             HuffNode *p = node[i]; node[i] = node[i - 1]; node[i - 1] = p;         }         count--;     }     // Now HEAD = node = tree     node->parent = NULL; }  void encode_stream(FILE* fin, FILE* fout, HuffNode* tree, unsigned* padding) {     int n;     byte ch;     byte buf = 0, nbuf = 0;     HuffNode *p;     byte code;     while (1) {         n = fgetc(fin);         if (n < 0)             break;         ch = n;          // Encode         p = &tree[ch];         n = 0;         while (p->parent) {             if (p == p->parent->left) {                 // Left is 0                 code[n] = 0;             } else if (p == p->parent->right) {                 code[n] = 1;             }             p = p->parent;             n++;         }          // Write         for (int i = n - 1; i >= 0; i--) {             buf |= code[i] << nbuf;             nbuf++;             if (nbuf == 8) {                 fputc(buf, fout);                 nbuf = buf = 0;             }         }     }     fputc(buf, fout);     *padding = 8 - nbuf; }  void decode_stream(FILE* fin, FILE* fout, HuffNode* tree, unsigned padding) {     size_t startpos = ftell(fin); // should be 1028     fseek(fin, 0L, SEEK_END);     size_t endpos = ftell(fin); // last byte handling     fseek(fin, startpos, SEEK_SET);     int count = endpos - startpos;      byte buf = 0, nbuf = 0, bit;     HuffNode *p;     while (count > 0 || nbuf > 0) {         // Start from tree top         p = tree + 510;         while (p->left || p->right) {             // Prepare next bit if needed             if (nbuf == 0) {                 if (count <= 0)                     return;                  buf = fgetc(fin);                 if (count == 1) {                     // Last bit                     nbuf = 8 - padding;                     if (nbuf == 0) {                         return;                     }                 } else {                     nbuf = 8;                 }                 count--;             }             // p has child             bit = buf & 1;             buf >>= 1;             nbuf--;             if (bit == 0)                 p = p->left;             else                 p = p->right;         }         fputc(p->data, fout);     } }  void compress_file(const char* filename, const char* newname) {     FILE *fin = fopen(filename, "rb"),          *fout = fopen(newname, "wb");      unsigned freq, padding;     HuffNode tree;     size_t padding_pos;     count_frequency(fin, freq);     construct_huffman(freq, tree);     rewind(fin);     for (int i = 0; i < 256; i++)         fwrite(freq + i, 4, 1, fout);     // Write a placeholder for the padding     padding_pos = ftell(fout);     fwrite(&padding, 4, 1, fout);     encode_stream(fin, fout, tree, &padding);     // Write the padding to the placeholder     fseek(fout, padding_pos, SEEK_SET);     fwrite(&padding, 4, 1, fout);     fclose(fin);     fclose(fout); }  void uncompress_file(const char* filename, const char* newname) {     FILE *fin = fopen(filename, "rb"),          *fout = fopen(newname, "wb");      unsigned freq, padding;     HuffNode tree;     for (int i = 0; i < 256; i++) {         fread(&padding, 4, 1, fin);         freq[i] = padding;     }     fread(&padding, 4, 1, fin);     construct_huffman(freq, tree);     decode_stream(fin, fout, tree, padding);     fclose(fin);     fclose(fout); }  void print_help(void) {     puts("Usage: huffman (-c|-d) input output");     puts("  -c    Compress file from input to output");     puts("  -d    Uncompress file from input to output");     puts("\nCreated by iBug"); }  int main(int argc, char** argv) {     if (argc != 4) {         print_help();         return 1;     }     if (!strcmp(argv, "-c")) {         compress_file(argv, argv);     } else if (!strcmp(argv, "-d")) {         uncompress_file(argv, argv);     } else {         print_help();         return 1;     }     return 0; } 

In addition to the mandatory CC BY-SA 3.0 license by posting on Stack Exchange, the code itself also has a MIT license.

On a side note: Although the course has ended and this code is not maintained anymore, it’s still one of the programs that I have written with maximum attention and carefulness, so I believe that any feedback to this code is highly valuable and I will remember them in my future C-coding times.

Posted on Categories buy elite proxies

## Huffman Coding API in Swift

This question is a follow up question to: this post

I’d love some advice on my implementation and the API of the Huffman class.

I’m also not sure how to test if my implementation is actually resulting in less bytes than a string. It seems that encoded.count (as a Data) is larger than word.utf8.count (as a String). Maybe I’m just not testing on large enough strings?

Also any thoughts on HuffData storing the code ex. ["00","01"] and the frequencyTable instead of the tree.

Here’s an example of how the API is used:

let word = "MISSISSIPPI_RIVER!" let encoded = try? Huffman.encode(word) let decoded = try? Huffman.decode(encoded!) XCTAssertEqual(decoded, word) 

Here’s the code:

import Foundation  struct HuffData: Codable {     var code: [String]     var frequencyTable: [String: String] }  class Huffman {     static func decode(_ data: Data) throws -> String {         do {             let huff = try JSONDecoder().decode(HuffData.self, from: data)             let reverseTable = Dictionary(uniqueKeysWithValues: zip(huff.frequencyTable.values, huff.frequencyTable.keys))             return huff.code.compactMap({ reverseTable[$0]}).joined() } catch let error { throw error } } static func encode(_ input: String) throws -> Data { let frequencyTable = Huffman.buildFrequencyTable(for: input) let code = input.compactMap({frequencyTable[String($  0)]})         let huff = HuffData(code: code, frequencyTable: frequencyTable)         do {             let data = try JSONEncoder().encode(huff)             return data         }         catch let error {             throw error         }     }      static private func buildFrequencyTable(for input: String) -> [String: String] {         // count letter frequency         let sortedFrequency = input.reduce(into: [String: Int](), { freq, char in             freq[String(char), default: 0] += 1         })         // create queue of initial Nodes         let queue = sortedFrequency.map{ Node(name: $0.key, value:$  0.value)}         // generate key by traversing tree         return Huffman.generateKey(for: Huffman.createTree(with: queue), prefix: "")     }      static private func generateKey(for node: Node, prefix: String) -> [String: String] {         var key = [String: String]()         if let left = node.left, let right = node.right {             key.merge(generateKey(for: left, prefix: prefix + "0"), uniquingKeysWith: {current,_ in current})             key.merge(generateKey(for: right, prefix: prefix + "1"), uniquingKeysWith: {current,_ in current})         }else {             key[node.name] = prefix         }         return key     }      static private func createTree(with queue: [Node]) -> Node {         // initialize queue that sorts by decreasing count         var queue = PriorityQueue(queue: queue)         // until we have 1 root node, join subtrees of least frequency         while queue.count > 1 {             let node1 = queue.dequeue()             let node2 = queue.dequeue()             let rootNode = Huffman.createRoot(with: node1, and: node2)             queue.enqueue(node: rootNode)         }         return queue.queue     }      static private func createRoot(with first: Node, and second: Node) -> Node {         return Node(name: "\(first.name)\(second.name)", value: first.value + second.value, left: first, right: second)     }  }  struct PriorityQueue {     var queue: [Node]     var count: Int {         return queue.count     }     mutating func enqueue(node: Node) {         queue.insert(node, at: queue.index(where: {$0.value <= node.value}) ?? 0) } mutating func dequeue() -> Node { return queue.removeLast() } init(queue: [Node]){ // assumes queue will always be sorted by decreasing count self.queue = queue.sorted(by: {$  0.value > \$  1.value})     } }  class Node: CustomStringConvertible {     var description: String {         return "\(name): \(value)"     }     let name: String     let value: Int     let left: Node?     let right: Node?      init(name: String, value: Int, left: Node? = nil, right: Node? = nil) {         self.name = name         self.value = value         self.left = left         self.right = right     } }