File size: 15,539 Bytes
bde0246
601625a
bde0246
ba74da4
807d5dd
bde0246
 
 
 
 
 
 
e0bff9e
bde0246
 
 
 
 
73877d0
bde0246
 
73877d0
bde0246
 
 
 
 
 
 
 
 
 
 
 
 
 
73877d0
bde0246
 
 
73877d0
 
bde0246
 
 
 
9abcf43
bde0246
 
 
 
73877d0
bde0246
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c46ce21
1a983b6
91529f9
 
 
 
 
bde0246
 
 
74b6070
bde0246
a1d5abd
73877d0
bde0246
 
 
 
 
 
91529f9
bde0246
 
73877d0
74b6070
bde0246
7784d88
 
 
51a1189
7784d88
 
 
51a1189
7784d88
 
 
51a1189
7784d88
 
 
51a1189
7784d88
 
 
726f19a
7784d88
 
62c2557
8855bae
 
507341d
8855bae
507341d
8855bae
 
 
 
 
 
 
 
507341d
 
 
8855bae
 
 
 
 
507341d
 
c209112
 
7784d88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398d050
 
7784d88
398d050
7784d88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398d050
 
cb79e32
 
 
 
 
 
 
398d050
174f48c
 
 
 
398d050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7784d88
398d050
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726f19a
398d050
 
 
 
baca0ca
 
398d050
 
 
 
 
 
 
 
 
7784d88
398d050
 
 
 
 
 
 
 
 
 
 
 
 
 
807d5dd
398d050
 
 
 
 
 
 
 
 
7784d88
5c77254
7784d88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
859771a
73877d0
7784d88
 
 
 
4d5770b
 
8c4c1fb
 
7784d88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3122072
7784d88
 
1a983b6
bde0246
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
<!DOCTYPE html>
<html>
<head>
  <meta charset="UTF-8"> 
  <title>Am I overpaying for GPT-4?</title>
  <style>
    body {
      font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
      margin: 40px;
      background-color: #f4f4f4;
      color: #333;
    }
    .container{max-width: 1200px; margin: 0 auto}
    h1, h4 {
      color: #333;
    }
    .upload_block {
      background-color: white;
      padding: 15px;
      border-radius: 8px;
      box-shadow: 0 4px 8px rgba(0,0,0,0.1);
      margin-top: 15px;
    }
    input[type="file"] {
      border: 1px solid #ccc;
      display: inline-block;
      padding: 6px 12px;
      cursor: pointer;
      border-radius: 4px;
      transition: background-color 0.3s ease;
    }
    input[type="file"]:hover {
      background-color: #f0f0f0;
    }
    #loadingIndicator {
      display: none;
      margin-top: 15px;
    }
    #result {
      background-color: white;
      padding: 15px;
      margin-top: 15px;
      border-radius: 8px;
      box-shadow: 0 4px 8px rgba(0,0,0,0.1);
      white-space: pre-wrap;
      display: none;
      position: relative;
    }
    ul {
      list-style-type: none;
      padding: 0;
      margin: 0;
    }
    ul li {
      padding: 8px;
      border-bottom: 1px solid #eee;
    }
    ul li:last-child {
      border-bottom: none;
    }
    a {
      color: #007bff;
      text-decoration: none;
    }
    a:hover {
      text-decoration: underline;
    }
    .color-red {
      color: #ff4136;
    }
    .color-green {
      color: #2ecc40;
    }
    .info-icon{position: absolute;right: 0;margin-right: 20px;color: gray; cursor: pointer}
    .details{display: none;}
    .loading span{
      position: absolute;
      margin-top: 0.25em;
      margin-left: 0.5em;
    }
  </style>
</head>
<body>
  <div class="container">
  <h1>Am I overpaying for ChatGPT?</h1>
  <h4>ChatGPT subscription is US$20 a month. Via <a href="https://platform.openai.com" target="_blank">the API</a>, GPT-4 Turbo costs US$0.03 per 1000 tokens. Check if you are overpaying or underpaying!</h4>
  <small>To export your history, go to <a href="https://chat.openai.com" target="_blank">ChatGPT</a> > Settings > Data Control > Export Data. This file is not sent to any server.</small>
  <div class="upload_block">
    <p style="margin-top: 0">Upload your <code>conversations.json</code> file</p>
    <input type="file" id="fileInput" accept=".json">

  </div>
  <div id="loadingIndicator" style="display: none;">
    <p class="loading" style="color:gray"><img width=30 src="loading-buffering.gif" alt="Loading..."><span>Tokenizing messages locally with <code>transformers.js</code>...</span></p>
  </div>
  <pre id="result"></pre>
  <p><small>This tool runs locally on your machine and does not send your history to any server (you can verify it <a href="https://huggingface.co/spaces/multimodalart/am-i-overpaying-for-chatgpt/blob/main/index.html" target="_blank">here</a>). The number of tokens are calculated using <a href="https://huggingface.co/docs/transformers.js/en/index">transformers.js</a>. Image Generation and GPT4-V are taken into account.</small></p>
  </div>
  <script type="module">
  function calculateVisionTokens(width, height) {
      const initialResizeWidth = width > 2048 || height > 2048 ? (width > height ? 2048 : Math.round(2048 * (width / height))) : width;
      const initialResizeHeight = width > 2048 || height > 2048 ? (width > height ? Math.round(2048 / (width / height)) : 2048) : height;
    
      const furtherResizeWidth = initialResizeWidth > 768 || initialResizeHeight > 768
        ? (initialResizeWidth < initialResizeHeight ? Math.min(768, initialResizeWidth) : Math.round(Math.min(768, initialResizeHeight) * (initialResizeWidth / initialResizeHeight)))
        : initialResizeWidth;
    
      const furtherResizeHeight = initialResizeWidth > 768 || initialResizeHeight > 768
        ? (initialResizeWidth < initialResizeHeight ? Math.round(Math.min(768, initialResizeWidth) / (initialResizeWidth / initialResizeHeight)) : Math.min(768, initialResizeHeight))
        : initialResizeHeight;
    
      const verticalTiles = 1 + Math.ceil((furtherResizeHeight - 512) / (512 * (1 - 0)));
      const horizontalTiles = 1 + Math.ceil((furtherResizeWidth - 512) / (512 * (1 - 0)));
      const totalTiles = verticalTiles * horizontalTiles;
    
      const baseTokens = 85;
      const tileTokens = 170;
      const totalTokens = baseTokens + totalTiles * tileTokens;
    
      return totalTokens;
  }
  function calculateBranches(mapping) {
      const branches = [];
  
      // Helper function to recursively build branches
      function buildBranch(nodeId, currentBranch) {
          const node = mapping[nodeId];
          if (!node) return;
          const updatedBranch = [...currentBranch, node];
          if (node.children.length === 0) {
              branches.push(updatedBranch);
          } else {
              node.children.forEach(childId => {
                  buildBranch(childId, updatedBranch);
              });
          }
      }
  
      // Find root nodes (nodes without parents)
      const rootNodes = Object.values(mapping).filter(node => !node.parent);
  
      // Build branches starting from each root node
      rootNodes.forEach(rootNode => buildBranch(rootNode.id, []));
  
      return branches;
  }

  import { AutoTokenizer } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.15.1';
  document.addEventListener("DOMContentLoaded", async (event) => {
    const tokenizer = await AutoTokenizer.from_pretrained('Xenova/gpt-4');
    const fileInput = document.getElementById('fileInput');
    const resultElement = document.getElementById('result');
    const loadingIndicator = document.getElementById('loadingIndicator');
  
    const sevenMonthsAgo = new Date();
    sevenMonthsAgo.setMonth(sevenMonthsAgo.getMonth() - 7);
    const currentDate = new Date();
    const currentMonthYear = `${currentDate.getMonth() + 1}/${currentDate.getFullYear()}`;
    fileInput.addEventListener('change', function(event) {
      const file = event.target.files[0];
      const reader = new FileReader();
  
      loadingIndicator.style.display = 'block';
  
      reader.onload = async function(e) {
        if (!tokenizer) {
          console.error('Tokenizer is not initialized.');
          loadingIndicator.style.display = 'none'; // Hide if error
          return;
        }
  
        const jsonData = JSON.parse(e.target.result);
        const monthlyData = {};
        let totalSavings = 0;
  
        jsonData.forEach(conversation => {
          const createTime = new Date(conversation.create_time * 1000);
          if (createTime >= sevenMonthsAgo) {
            const monthYear = `${createTime.getMonth() + 1}/${createTime.getFullYear()}`;
  
            if (!monthlyData[monthYear]) {
              monthlyData[monthYear] = {
                gpt4Tokens: 0,
                gpt4InputTokens: 0,
                gpt4VisionTokens: 0,
                gpt4OutputTokens: 0,
                gpt3Tokens: 0,
                gpt3InputTokens: 0,
                gpt3OutputTokens: 0,
                gpt4Cost: 0,
                gpt3Cost: 0,
                images: 0,
                rectangleImages: 0,
                imagesCost: 0,
              };
            }
  
            
            let inputTokens = 0;
            let visionTokens = 0;
            let outputTokens = 0;
            let images = 0;
            let rectangleImages = 0;
            let currentModel = "gpt-4";

            //Calculate branches, this takes into account users can edit their messagens and branch them out
            const branches = calculateBranches(conversation.mapping);
            branches.forEach(branch => {
              let conversationHistory = [];
              branch.forEach(node => {
                if (node.message && node.message.content && node.message.content.parts) {
                  const role = node.message.author.role;
                  const content = node.message.content.parts.join(' ').replace(/\[object Object\]/g, '');
                  const currentMessage = {"role": role, "content": content};
                  conversationHistory.push(currentMessage);
                  //If role user, enter the chat history for this branch as an input
                  if(role == "user"){
                    const input_ids = tokenizer.apply_chat_template(conversationHistory, { tokenize: true, return_tensor: false });
                    inputTokens += input_ids.length;
                    //If user used GPT-4V, calculate the cost of doing that
                    if(node.message.content.content_type == "multimodal_text"){
                      for (const image of node.message.content.parts){
                        if(image.width && image.height){
                          visionTokens += calculateVisionTokens(image.width, image.height)
                        }
                      }
                    }
                  //If role assistant, the assistant message is an output
                  }else if(role == "assistant"){
                    const output_ids = tokenizer.apply_chat_template([currentMessage], { tokenize: true, return_tensor: false });
                    outputTokens += output_ids.length;
                  //If tool DALL-E, calculate the cost per image
                  }else if(role == "tool" && node.message.author.name == "dalle.text2im"){
                    if(node.message.content.content_type == "multimodal_text"){
                      for (const image of node.message.content.parts){
                        if(image.width && image.height){
                          if(image.width / image.height == 1){
                            images+=1;
                          }else{
                            rectangleImages+= 1;
                          }
                        }
                      }
                    }
                  //If other tools (browsing, code interpreter), ignore
                  }else{
                    return;
                  }
          
                  if (node.message.metadata && node.message.metadata.model_slug) {
                    if (node.message.metadata.model_slug.includes('gpt-4')) {
                      currentModel = "gpt-4";
                    }else{
                      currentModel = "gpt-3";
                    }
                  }
              }
              })
            });
            if (currentModel == 'gpt-4') {
                  const inputTokenCost = inputTokens * (10 / 1000000);
                  const outputTokenCost = outputTokens * (30 / 1000000);
                  monthlyData[monthYear].gpt4InputTokens += inputTokens;
                  monthlyData[monthYear].gpt4OutputTokens += outputTokens;
                  monthlyData[monthYear].gpt4Tokens += inputTokens + outputTokens;
                  monthlyData[monthYear].gpt4Cost += inputTokenCost + outputTokenCost;
            }else{
                  const inputTokenCost = inputTokens * (0.5 / 1000000);
                  const outputTokenCost = outputTokens * (1.5 / 1000000);
                  monthlyData[monthYear].gpt3InputTokens += inputTokens;
                  monthlyData[monthYear].gpt3OutputTokens += outputTokens;
                  monthlyData[monthYear].gpt3Tokens += inputTokens + outputTokens;
                  monthlyData[monthYear].gpt3Cost += inputTokenCost + outputTokenCost;
            }
            const imagesCost = images * 0.04;
            const RectanglesCost = rectangleImages * 0.08;
            const visionCost = visionTokens * 0.00001;
            monthlyData[monthYear].gpt4VisionTokens += visionTokens;
            monthlyData[monthYear].gpt4Cost += visionCost;
            monthlyData[monthYear].images += images;
            monthlyData[monthYear].rectangleImages += rectangleImages;
            monthlyData[monthYear].imagesCost += imagesCost + RectanglesCost;
          }
        });
  
        let resultText = '<div class="info-icon">more details ⓘ</div><ul>';
        
        Object.entries(monthlyData).forEach(([monthYear, data]) => {
          let totalCost = 0;
          if(data.gpt4Cost != 0 || data.imagesCost != 0){
            totalCost = data.gpt4Cost + data.gpt3Cost + data.imagesCost;
          }
          
          if(currentMonthYear == monthYear){
            resultText += `<li>${monthYear}: <b>Total API cost: $${totalCost.toFixed(2)}</b> (<i>current month</i>)`;
          }else if(totalCost > 0){
            totalSavings += 20 - totalCost; // Calculate total savings or extra cost
            resultText += `<li>${monthYear}: <b>Total API cost: $${totalCost.toFixed(2)}</b>`;
            resultText += (totalCost <= 20) ?
              `. <span class="color-red">You overpaid ChatGPT by <b>$${(20 - totalCost).toFixed(2)}</b> this month.</span>` :
              `. <span class="color-green">You underpaid ChatGPT by <b>$${(totalCost - 20).toFixed(2)}</b> this month.</span>`;
          }else{
            resultText += `<li>${monthYear}: <small>we assume no subscription this month due to no GPT-4 usage</small>`;
          }
          resultText += `<br><span class="details" style="color:gray">gpt4InputTokens: ${data.gpt4InputTokens}, gpt4OutputTokens: ${data.gpt4OutputTokens}, gp4VisionTokens: ${data.gpt4VisionTokens}, gpt3InputTokens ${data.gpt3InputTokens}, gpt3OutputTokens ${data.gpt3OutputTokens}, DALL·E 3 Square Images: ${data.images}, DALL·E 3 Rectangular Images: ${data.rectangleImages}`;
          resultText += '</li>';
          
        });
        resultText += '</ul>';
        
        const summaryText = totalSavings >= 0 ?
          `<span class="color-red">You are overpaying ChatGPT</span>. In the last 6 months, you could have saved US$${totalSavings.toFixed(2)} by using the API.` :
          `<span class="color-green">You are underpaying ChatGPT</span>. In the last 6 months, you saved US$${Math.abs(totalSavings).toFixed(2)} by being subscribed to ChaGPT Plus.`;
  
        const extraInfo = totalSavings > 0 ? `<div>You can use your <a href="https://platform.openai.com" target="_blank">GPT-4 Turbo API</a> in a ChatGPT-like UI with open source tools like <a href="https://github.com/huggingface/chat-ui/issues/253" target="_blank">🤗 ChatUI</a>, <a href="https://github.com/ztjhz/BetterChatGPT" target="_blank">BetterChatGPT</a>, <a href="https://github.com/deiucanta/chatpad" target="_blank">ChatPad</a><br><br>You may also use state of the art open source models for free with <a href="https://huggingface.co/chat/" target="_blank">HuggingChat</a></div>` : ``
        resultElement.innerHTML = resultText + `<h2>${summaryText}</h2> ${extraInfo}`;
        resultElement.style.display = 'block'
        loadingIndicator.style.display = 'none';
      };
  
      reader.readAsText(file);
  });

    document.body.addEventListener('click', function(e) {
      if(e.target.classList.contains('info-icon')) {
        if (e.target.textContent.includes('more details')) {
          e.target.textContent = 'less details ⓘ';
          // Show .details elements
          document.querySelectorAll('.details').forEach(details => {
            details.style.display = 'block';
          });
        } else {
          e.target.textContent = 'more details ⓘ';
          // Hide .details elements
          document.querySelectorAll('.details').forEach(details => {
            details.style.display = 'none';
          });
        }
      }
    });
  });
</script>
</body>
</html>