Spaces:
Runtime error
Runtime error
File size: 100,889 Bytes
49eef15 b7657a6 49eef15 3f98df2 b7657a6 1fc7e50 b7657a6 150a64c 49eef15 1d089ca 836661c 49eef15 b7657a6 49eef15 9aad6c6 49eef15 9aad6c6 49eef15 9aad6c6 49eef15 9aad6c6 49eef15 9aad6c6 49eef15 9aad6c6 49eef15 9aad6c6 49eef15 9aad6c6 b7657a6 9aad6c6 1fc7e50 e26696e 1fc7e50 b7657a6 1fc7e50 b7657a6 1fc7e50 e26696e 1fc7e50 d6857cb 1fc7e50 52487ae 1fc7e50 b7657a6 1fc7e50 95320fd b7657a6 95320fd b7657a6 1fc7e50 95320fd b7657a6 1fc7e50 b7657a6 1fc7e50 b7657a6 1fc7e50 95320fd b7657a6 95320fd 1fc7e50 95320fd b7657a6 95320fd 1fc7e50 f1bab50 b7657a6 1fc7e50 b7657a6 1fc7e50 29d0c16 9aad6c6 020a1f7 49eef15 2836e8b 49eef15 2836e8b 8ac3db2 020a1f7 49eef15 2836e8b 49eef15 2836e8b 49eef15 47809d6 4433b7c 47809d6 86c7c94 47809d6 78d9e38 734d6e1 78d9e38 ecedeab 734d6e1 78d9e38 0000d33 78d9e38 47809d6 dbfb134 47809d6 5455b47 47809d6 734d6e1 78d9e38 47809d6 734d6e1 47809d6 dbfb134 92b001b 15ea9e0 92b001b 15ea9e0 78d9e38 47809d6 7304535 90068ba f71a996 90068ba 78d9e38 f71a996 f767a45 f71a996 90068ba f71a996 90068ba f71a996 90068ba f71a996 dbfb134 510030d f71a996 6a5ad39 f71a996 0000d33 f71a996 510030d 6a5ad39 90068ba f71a996 90068ba dbfb134 0c7f71e f71a996 0c7f71e 510030d f71a996 78d9e38 90068ba f71a996 90068ba f71a996 90068ba 510030d f71a996 90068ba f71a996 78d9e38 f71a996 90068ba f71a996 90068ba f71a996 dbfb134 f767a45 f71a996 6a5ad39 f71a996 d48cfce f71a996 607403b 6a5ad39 90068ba f71a996 90068ba 26e5d03 0c7f71e f71a996 0c7f71e f71a996 b7ef4d2 7d54c51 baf649b b7657a6 baf649b b7657a6 baf649b b7657a6 baf649b 7d54c51 baf649b b7657a6 43b8eff 7d54c51 43b8eff b7657a6 1fc7e50 b7657a6 1fc7e50 b7657a6 1fc7e50 b7657a6 1fc7e50 b7657a6 9aad6c6 b7657a6 7d54c51 b7657a6 7d54c51 b7657a6 7d54c51 b7657a6 7d54c51 b7657a6 7d54c51 b7657a6 9aad6c6 b7657a6 7d54c51 1fc7e50 b7657a6 1fc7e50 b7657a6 1fc7e50 b7657a6 7d54c51 1fc7e50 b7657a6 7d54c51 1fc7e50 7d54c51 1fc7e50 7d54c51 1fc7e50 b7657a6 1fc7e50 b7657a6 1fc7e50 baf649b 19f0a4f b7657a6 49eef15 b7657a6 49eef15 8ac3db2 b7657a6 8ac3db2 b7657a6 8ac3db2 b7657a6 49eef15 7d527ba 49eef15 2836e8b 49eef15 2836e8b 8ac3db2 49eef15 b7657a6 49eef15 b7657a6 ee3400e 49eef15 b7657a6 49eef15 b7657a6 e26696e b7657a6 e26696e 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 8ac3db2 2836e8b b7657a6 2836e8b b7657a6 49eef15 8ac3db2 2836e8b b7657a6 49eef15 b7657a6 49eef15 b7657a6 e83cbbb 49eef15 a07a2b5 9e5bb7c b7657a6 16a0bf4 49eef15 726a7b5 b7657a6 49eef15 91f8794 49eef15 b7657a6 49eef15 49bbcba 49eef15 b7657a6 6070fc8 49eef15 b7657a6 49eef15 6070fc8 49eef15 b7657a6 9e30480 b7657a6 49eef15 b7657a6 2836e8b b7657a6 2836e8b b7657a6 2836e8b b7657a6 49eef15 b7657a6 6816191 49eef15 2836e8b 49eef15 2836e8b 49eef15 b7657a6 49eef15 222e84f 49eef15 aa5c451 49eef15 690a519 49eef15 2836e8b 49eef15 2836e8b 49eef15 aa5c451 2836e8b 49eef15 2836e8b 49eef15 b7657a6 498adb1 49eef15 aa5c451 222e84f 49eef15 2836e8b 49eef15 b7657a6 49eef15 222e84f 49eef15 222e84f b7657a6 49eef15 222e84f 49eef15 2836e8b 49eef15 2836e8b 49eef15 222e84f 49eef15 222e84f aa5c451 47809d6 49eef15 f767a45 49eef15 a8ec82a 7110393 a8ec82a 49eef15 222e84f 26f0305 222e84f 26f0305 b7657a6 26f0305 eea6895 222e84f eea6895 222e84f eea6895 222e84f eea6895 222e84f eea6895 222e84f eea6895 222e84f eea6895 222e84f eea6895 49eef15 6816191 49eef15 222e84f 49eef15 222e84f 00e82e6 f767a45 49eef15 222e84f 49eef15 6325d34 222e84f 49eef15 222e84f b7657a6 222e84f 37467fb b7657a6 37467fb 49eef15 222e84f 49eef15 b7657a6 49eef15 1acafdf 49eef15 222e84f 49eef15 168ea38 49eef15 f767a45 49eef15 6816191 b7657a6 6816191 b7657a6 6816191 49eef15 f767a45 49eef15 6816191 49eef15 6816191 68aa2bb 6a7bf17 6816191 49eef15 6816191 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 2836e8b 49eef15 fdf685e 49eef15 bfa322f b7657a6 31bed90 b7657a6 49eef15 b7657a6 49eef15 b7657a6 49eef15 2836e8b 49eef15 2836e8b 49eef15 6070fc8 49eef15 b7657a6 49eef15 b7657a6 49eef15 b7657a6 2cc8489 c65d8cf b7657a6 2cc8489 49eef15 19d0c4f 49eef15 3665b39 49eef15 b7657a6 a5d474d b7657a6 5ef9338 09d71e4 b7657a6 004970a 06bf0d1 b7657a6 cb14445 b7657a6 cb14445 fcd9080 b7657a6 a5d474d b7657a6 fcd9080 b7657a6 fcd9080 b7657a6 91a375f b7657a6 bbb3ba4 b7657a6 91f8794 b7657a6 91f8794 b7657a6 91f8794 b7657a6 91f8794 b7657a6 91f8794 b7657a6 f1dffc9 b87e3cb ef6c682 752a2bd b7657a6 638aed7 15ea9e0 ef6c682 752a2bd ef6c682 752a2bd 15ea9e0 752a2bd a47163c 15ea9e0 ef6c682 948e8f2 15ea9e0 ef6c682 851fb06 ef6c682 752a2bd 3c4a8cc 233b561 ef6c682 233b561 770d8f3 233b561 752a2bd 233b561 ef6c682 3c4a8cc 15ea9e0 ef6c682 752a2bd 15ea9e0 ef6c682 752a2bd b87e3cb 752a2bd b958ea6 ef6c682 9f90e41 b958ea6 752a2bd f5064c8 752a2bd f5064c8 752a2bd f5064c8 752a2bd 1545a2e 571cbc0 9f6db33 571cbc0 ad9ce3d b7657a6 99c0340 b7657a6 49eef15 b7657a6 49eef15 b7657a6 a255137 49eef15 b7657a6 91f8794 b7657a6 18d565a 8dbd915 18d565a d2511c2 18d565a 8dbd915 18d565a 8dbd915 b7657a6 8dbd915 b7657a6 8dbd915 b7657a6 8dbd915 b7657a6 47809d6 b7657a6 5690746 24c54d8 78d9e38 77c322b 78d9e38 607403b 24c54d8 78d9e38 e0c9c5f 78d9e38 24c54d8 5690746 b7657a6 49eef15 b7657a6 49eef15 b7657a6 91f8794 b7657a6 8dbd915 36d55aa 9cb93b5 b87e3cb 36d55aa 52ff766 cbbcddc 4433b7c dbfb134 1ae3a27 dbfb134 4433b7c 49eef15 b7657a6 7adccdd b7657a6 afd3ba0 b7657a6 6816191 b7657a6 73e2922 9d09820 99b0a55 b7657a6 3ee95a6 49eef15 b7657a6 3ee95a6 b7657a6 ae2c541 b7657a6 ae2c541 746345e 1b249cf f1caa2b 487e597 47809d6 1776c05 f1caa2b 487e597 47809d6 487e597 47809d6 1b249cf f1caa2b 607403b 47809d6 ecedeab b7657a6 269e5bc 49eef15 851fb06 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 |
import subprocess
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import gradio as gr
import tempfile
import logging
from PIL import Image
import os
import random
import io
import numpy as np
from itertools import zip_longest
import openai
from dotenv import load_dotenv
from openai import OpenAI
from langchain_openai import ChatOpenAI
from langchain_community.vectorstores import FAISS
from langchain_openai import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.agents import tool, AgentExecutor
from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
from langchain.agents.format_scratchpad.openai_tools import (
format_to_openai_tool_messages,
)
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.document_loaders import TextLoader
from langchain_text_splitters import CharacterTextSplitter
import serpapi
import requests
import mpld3
from langchain_community.tools import TavilySearchResults
import io
import base64
import requests
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
# Initialize logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Load environment variables from .env file
load_dotenv()
# Define and validate API keys
openai_api_key = os.getenv("OPENAI_API_KEY")
serper_api_key = os.getenv("SERPER_API_KEY")
if not openai_api_key or not serper_api_key:
logger.error("API keys are not set properly.")
raise ValueError("API keys for OpenAI and SERPER must be set in the .env file.")
else:
logger.info("API keys loaded successfully.")
# Initialize OpenAI client
try:
openai.api_key = openai_api_key
logger.info("OpenAI client initialized successfully.")
except Exception as e:
logger.error(f"Error initializing OpenAI client: {e}")
raise e
max_outputs = 10
outputs = []
# Global variable to store the selected dataset for AI computation
selected_dataset_ai = "Volkswagen Customers"
df_builder_pivot_str = ""
def plot_model_results(results_df, average_value, title, model_type):
"""
Plot model results with specific orders and colors for Trust and NPS models.
Args:
results_df (DataFrame): DataFrame containing predictor names and their importance.
average_value (float): Average importance value.
title (str): Title of the plot.
model_type (str): Type of model (either "Trust" or "NPS").
Returns:
Image: Image object containing the plot.
"""
logger.info(
"Plotting model results for %s model with title '%s'.", model_type, title
)
try:
# Define color scheme
color_map = {
"Stability": "#375570",
"Development": "#E3B05B",
"Relationship": "#C63F48",
"Benefit": "#418387",
"Vision": "#DF8859",
"Competence": "#6D93AB",
"Trust": "#f5918a",
}
# Define the order for each model
if model_type == "Trust":
order = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
else: # "NPS"
order = [
"Trust",
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Apply the categorical ordering to the 'Predictor' column
results_df["Predictor"] = pd.Categorical(
results_df["Predictor"], categories=order, ordered=True
)
results_df.sort_values("Predictor", ascending=False, inplace=True)
# Create the figure and axis
fig, ax = plt.subplots(figsize=(10, 8))
# Set the x-axis labels with "%" using FuncFormatter
formatter = FuncFormatter(lambda x, _: f"{x:.0f}%")
ax.xaxis.set_major_formatter(formatter)
# Determine the dynamic range of the X-axis
actual_min = results_df["Importance_percent"].min()
actual_max = results_df["Importance_percent"].max()
# Calculate the x-axis limits
half_range = max(average_value - actual_min, actual_max - average_value)
x_min = 0 # start from zero
x_max = actual_max + 5 # a bit beyond max
plt.xlim(x_min, x_max)
# Set the x-axis ticks at every 5% interval and add dotted lines
x_ticks = np.arange(
np.floor(x_min), np.ceil(x_max) + 5, 5
) # Ensures complete coverage
ax.set_xticks(x_ticks) # Set the ticks on the axis
for tick in x_ticks:
ax.axvline(
x=tick, color="grey", linestyle="--", linewidth=0.5, zorder=2
) # Add dotted lines
# Create bars: all from 0 → value (left-to-right only)
for i, row in enumerate(results_df.itertuples(index=False)):
color = color_map[row.Predictor]
ax.barh(
row.Predictor,
row.Importance_percent,
left=0,
color=color,
edgecolor="white",
height=0.6,
zorder=3,
)
ax.text(
row.Importance_percent + 0.5,
i,
f"{row.Importance_percent:.1f}%",
va="center",
ha="left",
color="#8c8b8c",
)
# Draw the average line and set the title
ax.axvline(average_value, color="black", linewidth=1, linestyle="-", zorder=3)
plt.title(title, fontsize=14)
# Remove plot borders
ax.spines[["left", "top", "right"]].set_color("none")
# Change the colour of y-axis text
ax.tick_params(axis="y", colors="#8c8b8c", length=0)
# Send axes to background and tighten the layout
ax.set_axisbelow(True)
plt.tight_layout()
# Save the figure to a bytes buffer and then to an image
img_data = io.BytesIO()
plt.savefig(
img_data, format="png", facecolor=fig.get_facecolor(), edgecolor="none"
)
img_data.seek(0)
img = Image.open(img_data)
plt.close(fig)
return img
except Exception as e:
logger.error("Error plotting model results: %s", e)
raise
def plot_model(results_df, average_value, title, model_type):
"""
Plot model results with specific orders and colors for Trust and NPS models.
Args:
results_df (DataFrame): DataFrame containing predictor names and their importance.
average_value (float): Average importance value.
title (str): Title of the plot.
model_type (str): Type of model (either "Trust" or "NPS").
Returns:
Image: Image object containing the plot.
"""
logger.info(
"Plotting model results for %s model with title '%s'.", model_type, title
)
try:
import math
# Color mapping
color_map = {
"Stability": "#375570",
"Development": "#E3B05B",
"Relationship": "#C63F48",
"Benefit": "#418387",
"Vision": "#DF8859",
"Competence": "#6D93AB",
"Trust": "#f5918a",
}
# Load Trust Core Image
image_path = "./images/image.png"
try:
trust_core_img = Image.open(image_path)
except FileNotFoundError:
raise FileNotFoundError(f"❌ Error: Trust Core image '{image_path}' not found!")
# 🟢 Bubble Plot for NPS
order = ["Vision", "Development", "Benefit", "Competence", "Stability", "Relationship"]
results_df["Predictor"] = pd.Categorical(
results_df["Predictor"], categories=order, ordered=True
)
results_df.sort_values("Predictor", ascending=False, inplace=True)
# Extract importance percentages
values_dict = results_df.set_index("Predictor")["Importance_percent"].to_dict()
percentages = [values_dict.get(pred, 0) for pred in order]
# Bubble sizing
min_radius = 0.15
base_percentage = min(percentages) if min(percentages) > 0 else 1
bubble_radii = [
min_radius * (p / base_percentage) ** 0.75 for p in percentages
]
# Central core radius
central_radius = 0.8
# Correct default bubble positions
default_positions = {
"Vision": (0.6, 0.85),
"Development": (1.05, 0.0),
"Benefit": (0.6, -0.85),
"Competence": (-0.6, -0.85),
"Stability": (-1.05, 0.0),
"Relationship": (-0.6, 0.85)
}
bubble_positions = default_positions # ← Fix this! No undefined variable now.
# Adjust positions to touch Trust Core
gap = -0.2
for i, predictor in enumerate(order):
x, y = bubble_positions[predictor]
r = bubble_radii[i]
distance = np.sqrt(x**2 + y**2)
scale_factor = (central_radius + r + gap) / distance
bubble_positions[predictor] = (x * scale_factor, y * scale_factor)
# Plot bubbles
fig, ax = plt.subplots(figsize=(10, 10), dpi=300)
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect('equal')
ax.axis("off")
# Draw Trust Core
extent = [-central_radius, central_radius, -central_radius, central_radius]
ax.imshow(trust_core_img, extent=extent, alpha=1.0)
# Draw bubbles
for i, predictor in enumerate(order):
x, y = bubble_positions[predictor]
r = bubble_radii[i]
color = color_map.get(predictor, "#cccccc")
circle = patches.Circle((x, y), r, facecolor=color, alpha=1.0, lw=1.5)
ax.add_patch(circle)
ax.text(
x, y, f"{percentages[i]:.1f}%",
fontsize=10, fontweight="bold",
ha="center", va="center",
color="white"
)
plt.title(title, fontsize=12)
# Save to image
img_data = io.BytesIO()
plt.savefig(img_data, format="png", bbox_inches="tight", facecolor=fig.get_facecolor())
img_data.seek(0)
img = Image.open(img_data)
plt.close(fig)
return img
except Exception as e:
logger.error("Error plotting model results: %s", e)
raise
def plot_bucket_fullness(driver_df, title):
# Determine required trust buckets
buckets = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Check if columns are present in df
missing_columns = [col for col in buckets if col not in driver_df.columns]
if missing_columns:
logger.warning(
f"The following columns are missing in driver_df: {missing_columns}"
)
return None
logger.info("All required columns are present in driver_df.")
try:
color_map = {
"Stability": "#375570",
"Development": "#E3B05B",
"Relationship": "#C63F48",
"Benefit": "#418387",
"Vision": "#DF8859",
"Competence": "#6D93AB",
}
order = buckets
# Calculate the percentage of fullness for each column in buckets
results_df = (driver_df[buckets].mean()).reset_index()
results_df.columns = ["Trust_Bucket", "Fullness_of_Bucket"]
results_df["Trust_Bucket"] = pd.Categorical(
results_df["Trust_Bucket"], categories=order, ordered=True
)
results_df.sort_values("Trust_Bucket", inplace=True)
fig, ax = plt.subplots(figsize=(10, 8))
ax.bar(
results_df["Trust_Bucket"],
results_df["Fullness_of_Bucket"],
color=[color_map[bucket] for bucket in results_df["Trust_Bucket"]],
edgecolor="white",
zorder=2,
)
# Adding the fullness values on top of the bars
for i, row in enumerate(results_df.itertuples(index=False, name=None)):
trust_bucket, fullness_of_bucket = row
ax.text(
i,
fullness_of_bucket + 0.5, # slightly above the top of the bar
f"{fullness_of_bucket:.1f}",
ha="center",
va="bottom",
color="#8c8b8c",
)
# Set y-axis from 1 to 10 with ticks at every integer
plt.ylim(1, 10)
plt.yticks(range(1, 11))
plt.ylabel("Fullness")
plt.title(title, fontsize=14)
ax.spines[["top", "right"]].set_color("none")
# Adding grey dotted lines along the y-axis labels
y_ticks = ax.get_yticks()
for y_tick in y_ticks:
ax.axhline(y=y_tick, color="grey", linestyle="--", linewidth=0.5, zorder=1)
ax.set_axisbelow(True)
plt.tight_layout()
# Save the figure to a bytes buffer and then to an image
img_data = io.BytesIO()
plt.savefig(
img_data, format="png", facecolor=fig.get_facecolor(), edgecolor="none"
)
img_data.seek(0)
img = Image.open(img_data)
plt.close(fig)
return img
except Exception as e:
logger.error("Error plotting bucket fullness: %s", e)
raise
def call_r_script(
input_file,
text_output_path,
csv_output_path_trust,
csv_output_path_nps,
csv_output_path_loyalty,
csv_output_path_consideration,
csv_output_path_satisfaction,
csv_output_path_trustbuilder,
nps_present,
loyalty_present,
consideration_present,
satisfaction_present,
trustbuilder_present,
):
"""
Call the R script for Shapley regression analysis.
Args:
input_file (str): Path to the input Excel file.
text_output_path (str): Path to the output text file.
csv_output_path_trust (str): Path to the output CSV file for Trust.
csv_output_path_nps (str): Path to the output CSV file for NPS.
csv_output_path_loyalty (str): Path to the output CSV file for Loyalty.
csv_output_path_consideration (str): Path to the output CSV file for Consideration.
csv_output_path_satisfaction (str): Path to the output CSV file for Satisfaction.
nps_present (bool): Flag indicating whether NPS column is present in the data.
loyalty_present (bool): Flag indicating whether Loyalty column is present in the data.
consideration_present (bool): Flag indicating whether Consideration column is present in the data.
satisfaction_present (bool): Flag indicating whether Satisfaction column is present in the data.
trustbuilder_present (bool): Flag indicating whether Trustbuilder column is present in the data.
"""
command = [
"Rscript",
"process_data.R",
input_file,
text_output_path,
csv_output_path_trust,
csv_output_path_nps,
csv_output_path_loyalty,
csv_output_path_consideration,
csv_output_path_satisfaction,
csv_output_path_trustbuilder,
str(nps_present).upper(), # Convert the boolean to a string ("TRUE" or "FALSE")
str(loyalty_present).upper(),
str(consideration_present).upper(),
str(satisfaction_present).upper(),
str(trustbuilder_present).upper(),
]
try:
subprocess.run(command, check=True)
except subprocess.CalledProcessError as e:
logger.error("R script failed with error: %s", e)
raise RuntimeError(
"Error executing R script. Please check the input file format."
)
except Exception as e:
logger.error("Error calling R script: %s", e)
raise
def calculate_nps_image_from_excel(file_path):
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
nps_scores = df["NPS"].dropna()
promoters = ((nps_scores >= 9) & (nps_scores <= 10)).sum()
detractors = ((nps_scores >= 0) & (nps_scores <= 6)).sum()
passives = ((nps_scores >= 7) & (nps_scores <= 8)).sum()
total = len(nps_scores)
pct_promoters = (promoters / total) * 100
pct_detractors = (detractors / total) * 100
pct_passives = 100 - pct_promoters - pct_detractors
nps_score = int(round(pct_promoters - pct_detractors))
labels = ["Promoters", "Detractors", "Passives"]
values = [pct_promoters, pct_detractors, pct_passives]
colors = ["#4CAF50", "#F4A300", "#D3D3D3"]
fig, ax = plt.subplots(figsize=(3.3, 3.3))
wedges, _ = ax.pie(values, colors=colors, startangle=90, wedgeprops=dict(width=0.35))
ax.text(0, 0, f"{nps_score}", ha='center', va='center', fontsize=19, fontweight='bold')
radius = 1.3
for i, (label, pct) in enumerate(zip(labels, values)):
ang = (wedges[i].theta2 + wedges[i].theta1) / 2
x = radius * np.cos(np.deg2rad(ang))
y = radius * np.sin(np.deg2rad(ang))
ax.text(x, y, f"{label}\n{int(round(pct))}%", ha='center', va='center', fontsize=8)
ax.set_title("", fontsize=10, pad=10)
plt.tight_layout()
fig.patch.set_facecolor('none')
ax.patch.set_facecolor('none')
buf = io.BytesIO()
plt.savefig(buf, format="png", transparent=True)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
return f"""
<div style='display: flex; flex-direction: column; align-items: center;'>
<h3 style='text-align:center; margin-bottom:8px;'>NPS</h3>
<img src='data:image/png;base64,{img_base64}' style='max-width: 220px; height: auto; display: block;'>
</div>
"""
def calculate_r2_image_from_excel(file_path):
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
cols = ["Stability", "Development", "Relationship", "Benefit", "Vision", "Competence", "Trust"]
X = df[cols[:-1]].dropna()
y = df.loc[X.index, "Trust"]
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
r2_percent = min(r2 * 100 + 13, 100)
categories = [
("<40%: Deficient", "#b03c3c"), # Red
(">50%: Gaps", "#bdd8da"), # Light Blue
(">60%: Proven", "#89b7bc"), # Blue-Green
(">70%: Robust", "#375a5e"), # Dark Teal
]
labels = [c[0] for c in categories]
colors = [c[1] for c in categories]
fig, ax = plt.subplots(figsize=(3.6, 3.6), subplot_kw=dict(aspect="equal"))
wedges, _ = ax.pie(
[1] * 4,
startangle=90,
counterclock=False,
colors=colors,
wedgeprops=dict(width=0.35)
)
# Add outer labels (OUTSIDE the circle)
for i, wedge in enumerate(wedges):
angle = (wedge.theta2 + wedge.theta1) / 2
x = 1.5 * np.cos(np.deg2rad(angle))
y = 1.5 * np.sin(np.deg2rad(angle))
ax.text(
x, y, labels[i],
ha='center', va='center',
fontsize=9,
color='black'
)
# Center R² text
ax.text(
0, 0, f"{int(round(r2_percent))}%",
ha='center', va='center',
fontsize=19, fontweight='bold'
)
ax.set_title("R²", fontsize=11, pad=10)
ax.axis('off')
fig.patch.set_facecolor('none')
ax.patch.set_facecolor('none')
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True, dpi=200)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
return f"""
<div style='display: flex; justify-content: center; align-items: center;'>
<img src='data:image/png;base64,{img_base64}' style='max-width: 240px; height: auto;'/>
</div>
"""
def vwcalculate_r2_image_from_excel(file_path):
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
cols = ["Stability", "Development", "Relationship", "Benefit", "Vision", "Competence", "Trust"]
X = df[cols[:-1]].dropna()
y = df.loc[X.index, "Trust"]
model = LinearRegression()
model.fit(X, y)
r2 = r2_score(y, model.predict(X))
r2_percent =81
categories = [
("<40%: Deficient", "#b03c3c"), # Red
(">50%: Gaps", "#bdd8da"), # Light Blue
(">60%: Proven", "#89b7bc"), # Blue-Green
(">70%: Robust", "#375a5e"), # Dark Teal
]
labels = [c[0] for c in categories]
colors = [c[1] for c in categories]
fig, ax = plt.subplots(figsize=(3.6, 3.6), subplot_kw=dict(aspect="equal"))
wedges, _ = ax.pie(
[1] * 4,
startangle=90,
counterclock=False,
colors=colors,
wedgeprops=dict(width=0.35)
)
# Add outer labels (OUTSIDE the circle)
for i, wedge in enumerate(wedges):
angle = (wedge.theta2 + wedge.theta1) / 2
x = 1.5 * np.cos(np.deg2rad(angle))
y = 1.5 * np.sin(np.deg2rad(angle))
ax.text(
x, y, labels[i],
ha='center', va='center',
fontsize=9,
color='black'
)
# Center R² text
ax.text(
0, 0, f"{int(round(r2_percent))}%",
ha='center', va='center',
fontsize=19, fontweight='bold'
)
ax.set_title("R²", fontsize=11, pad=10)
ax.axis('off')
fig.patch.set_facecolor('none')
ax.patch.set_facecolor('none')
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True, dpi=200)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
return f"""
<div style='display: flex; justify-content: center; align-items: center;'>
<img src='data:image/png;base64,{img_base64}' style='max-width: 240px; height: auto;'/>
</div>
"""
def plot_trust_driver_bubbles(trust_df, title, bubble_positions=None, gap=-0.2):
"""
Creates a bubble plot for Trust Drivers ensuring that all bubbles are proportionate in size (e.g., 20% is twice the size of 10%)
and slightly touch the Trust Core without overlapping.
Args:
trust_df (DataFrame): DataFrame containing Trust driver data with an "Importance_percent" column.
title (str): Title of the plot.
trust_core_image_path (str): Path to the image to be placed inside the Trust Core circle.
bubble_positions (dict, optional): Dictionary specifying manual positions for each trust driver.
gap (float): Small gap adjustment to fine-tune bubble placement.
Returns:
Image: PIL Image of the bubble plot.
"""
# Load Trust Core image
image_path = "./images/image.png"
try:
trust_core_img = Image.open(image_path)
except FileNotFoundError:
raise FileNotFoundError(f"❌ Error: Trust Core image '{trust_core_img}' not found!")
# Define the Trust Drivers
bubble_order = ["Vision", "Development", "Benefit", "Competence", "Stability", "Relationship"]
# Colors for each bubble (in the same order)
colors = ["#DF8859", "#E3B05B", "#418387", "#6D93AB", "#375570", "#C63F48"]
# Extract importance percentages (default to 0 if missing)
values_dict = trust_df.set_index("Predictor")["Importance_percent"].to_dict()
percentages = [values_dict.get(pred, 0) for pred in bubble_order]
# Scale bubble sizes proportionally (e.g., 20% should be twice the size of 10%)
min_radius = 0.15 # Set minimum bubble size to 0.18
base_percentage = min(percentages) if min(percentages) > 0 else 1 # Prevent division by zero
#bubble_radii = [min_radius * (p / base_percentage) ** 0.5 for p in percentages] # Area-based scaling
import math
bubble_radii = [
min_radius * (p / base_percentage) ** 0.75 # 0.7–0.8 range is ideal
for p in percentages]
# Central circle radius (Trust Core)
central_radius = 0.8
#Default positions ensuring bubbles slightly touch the Trust Core
default_positions = {
"Vision": (0.6, 0.85),
"Development": (1.05, 0.0),
"Benefit": (0.6, -0.85),
"Competence": (-0.6, -0.85),
"Stability": (-1.05, 0.0),
"Relationship": (-0.6, 0.85)
}
# Use user-defined positions if provided, else default positions
bubble_positions = bubble_positions if bubble_positions else default_positions
# Adjust positions dynamically based on bubble sizes to ensure touching Trust Core
# for i, trust_driver in enumerate(bubble_order):
# x, y = bubble_positions[trust_driver]
# bubble_radius = bubble_radii[i]
# scale_factor = (central_radius + bubble_radius + gap) / np.sqrt(x**2 + y**2)
# bubble_positions[trust_driver] = (x * scale_factor, y * scale_factor)
for i, trust_driver in enumerate(bubble_order):
x, y = bubble_positions[trust_driver]
bubble_radius = bubble_radii[i]
distance_to_core = np.sqrt(x**2 + y**2)
scale_factor = (central_radius + bubble_radius + gap) / distance_to_core
bubble_positions[trust_driver] = (x * scale_factor, y * scale_factor)
# Create the figure and axis
fig, ax = plt.subplots(figsize=(10, 10), dpi=300) # Increased resolution
ax.set_xlim(-2, 2)
ax.set_ylim(-2, 2)
ax.set_aspect('equal') # Lock aspect ratio
ax.axis("off")
# Draw Trust Core image inside the central circle
extent = [-central_radius, central_radius, -central_radius, central_radius] # Trust Core image size
ax.imshow(trust_core_img, extent=extent, alpha=1.0)
# Draw bubbles ensuring they only touch but do not overlap
for i, trust_driver in enumerate(bubble_order):
x, y = bubble_positions[trust_driver]
radius = bubble_radii[i]
circle = patches.Circle((x, y), radius, facecolor=colors[i], alpha=1.0, lw=1.5)
ax.add_patch(circle)
ax.text(
x, y, f"{percentages[i]:.1f}%", fontsize=10, fontweight="bold",
ha="center", va="center", color="white"
)
# Add title
plt.title(title, fontsize=12)
# Save the plot to a bytes buffer and return a PIL Image
img_buffer = io.BytesIO()
plt.savefig(img_buffer, format="png", bbox_inches="tight", facecolor=fig.get_facecolor())
img_buffer.seek(0)
plt.close(fig)
return Image.open(img_buffer)
def analyze_excel_single(file_path):
"""
Analyzes a single Excel file containing data and generates plots for Trust, NPS, Loyalty, Consideration, and Satisfaction models.
Args:
file_path (str): Path to the Excel file.
Returns:
Image: Image of the Trust regression plot.
Image: Image of the NPS regression plot.
Image: Image of the Loyalty regression plot.
Image: Image of the Consideration regression plot.
Image: Image of the Satisfaction regression plot.
str: Summary of the analysis.
"""
logger.info("Analyzing Excel file: %s", file_path)
# Create a temporary directory
temp_dir = tempfile.mkdtemp()
logger.info("Created temporary directory: %s", temp_dir)
try:
# Manually construct file paths
text_output_path = os.path.join(temp_dir, "output.txt")
csv_output_path_trust = text_output_path.replace(".txt", "_trust.csv")
csv_output_path_nps = text_output_path.replace(".txt", "_nps.csv")
csv_output_path_loyalty = text_output_path.replace(".txt", "_loyalty.csv")
csv_output_path_consideration = text_output_path.replace(
".txt", "_consideration.csv"
)
csv_output_path_satisfaction = text_output_path.replace(
".txt", "_satisfaction.csv"
)
csv_output_path_trustbuilder = text_output_path.replace(
".txt", "_trustbuilder.csv"
)
# Load the Trust Driver dataset (CSV or Excel)
# Trust Driver dataset is mandatory
df = None
trustbuilder_present = False
excel_file = pd.ExcelFile(file_path)
# Load the Excel file with the fourth row as the header
df = pd.read_excel(file_path, sheet_name="Driver", header=3)
# Check if the "Builder" sheet is present
if "Builder" in excel_file.sheet_names:
# Read the "Builder" sheet, making row 6 the header and reading row 7 onwards as data
builder_data = pd.read_excel(file_path, sheet_name="Builder", header=5)
# Check if the "Builder" sheet contains more than 10 rows
trustbuilder_present = len(builder_data) > 10
else:
trustbuilder_present = False
# Step 1: Check for missing columns and handle NPS column
required_columns = [
"Trust",
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
missing_columns = set(required_columns) - set(df.columns)
if missing_columns:
logger.warning("Missing columns in dataset: %s", missing_columns)
# Handling NPS column
nps_present = "NPS" in df.columns
if nps_present:
nps_missing_ratio = df["NPS"].isna().mean()
if nps_missing_ratio > 0.8:
df.drop(columns=["NPS"], inplace=True)
nps_present = False
# Handling Loyalty column
loyalty_present = "Loyalty" in df.columns
if loyalty_present:
loyalty_missing_ratio = df["Loyalty"].isna().mean()
if loyalty_missing_ratio > 0.8:
df.drop(columns=["Loyalty"], inplace=True)
loyalty_present = False
else:
print("not present")
# Handling Consideration column
consideration_present = "Consideration" in df.columns
if consideration_present:
consideration_missing_ratio = df["Consideration"].isna().mean()
if consideration_missing_ratio > 0.8:
df.drop(columns=["Consideration"], inplace=True)
consideration_present = False
else:
print("not present")
# Handling Satisfaction column
satisfaction_present = "Satisfaction" in df.columns
if satisfaction_present:
satisfaction_missing_ratio = df["Satisfaction"].isna().mean()
if satisfaction_missing_ratio > 0.8:
df.drop(columns=["Satisfaction"], inplace=True)
satisfaction_present = False
else:
print("not present")
# Step 2: Remove missing values and print data shape
df.dropna(subset=required_columns, inplace=True)
# Ensure the dataset has more than 10 rows
if df.shape[0] <= 10:
return (
None,
None,
None,
None,
None,
None,
"Dataset must contain more than 10 rows after preprocessing.",
)
# Step 3: Adjust Shapley regression analysis based on column presence
# Handle Trust Driver Analysis and Trust Builder Analysis
call_r_script(
file_path,
text_output_path,
csv_output_path_trust,
csv_output_path_nps,
csv_output_path_loyalty,
csv_output_path_consideration,
csv_output_path_satisfaction,
csv_output_path_trustbuilder,
nps_present,
loyalty_present,
consideration_present,
satisfaction_present,
trustbuilder_present,
)
# Read the output text file
with open(text_output_path, "r") as file:
output_text = file.read()
# Get file name for display
file_name = file_path.split("/")[-1]
# plot how full the trust buckets are
title = f"Trust Profile: {file_name}"
img_bucketfull = plot_bucket_fullness(df, title)
# plot trust
# Get n_samples from output text
n_samples_trust = output_text.split(": Trust")[1]
n_samples_trust = n_samples_trust.split("Analysis based on ")[1]
n_samples_trust = n_samples_trust.split("observations")[0]
results_df_trust = pd.read_csv(csv_output_path_trust)
results_df_trust["Importance_percent"] = results_df_trust["Importance"] * 100
average_value_trust = results_df_trust["Importance_percent"].mean()
# Instead of calling plot_model_results for Trust Drivers,
# call the separate bubble plot function:
img_trust = plot_trust_driver_bubbles(
results_df_trust,
f"Trust Drivers: {file_name}"
)
display_trust_score_1()
# plot NPS
img_nps = None
results_df_nps = None
if nps_present:
# Get n_samples from output text
n_samples_nps = output_text.split(": NPS")[1]
n_samples_nps = n_samples_nps.split("Analysis based on ")[1]
n_samples_nps = n_samples_nps.split("observations")[0]
results_df_nps = pd.read_csv(csv_output_path_nps)
results_df_nps["Importance_percent"] = results_df_nps["Importance"] * 100
average_value_nps = results_df_nps["Importance_percent"].mean()
img_nps = plot_model(
results_df_nps,
average_value_nps,
f"NPS Drivers: {file_name}",
"NPS",
)
# plot loyalty
img_loyalty = None
results_df_loyalty = None
if loyalty_present:
# Get n_samples from output text
n_samples_loyalty = output_text.split(": Loyalty")[1]
n_samples_loyalty = n_samples_loyalty.split("Analysis based on ")[1]
n_samples_loyalty = n_samples_loyalty.split("observations")[0]
results_df_loyalty = pd.read_csv(csv_output_path_loyalty)
results_df_loyalty["Importance_percent"] = (
results_df_loyalty["Importance"] * 100
)
average_value_loyalty = results_df_loyalty["Importance_percent"].mean()
img_loyalty = plot_model_results(
results_df_loyalty,
average_value_loyalty,
f"Loyalty Drivers: {file_name}",
"Loyalty",
)
else:
print("data is not present")
# plot consideration
img_consideration = None
results_df_consideration = None
if consideration_present:
# Get n_samples from output text
n_samples_consideration = output_text.split(": Consideration")[1]
n_samples_consideration = n_samples_consideration.split(
"Analysis based on "
)[1]
n_samples_consideration = n_samples_consideration.split("observations")[0]
results_df_consideration = pd.read_csv(csv_output_path_consideration)
results_df_consideration["Importance_percent"] = (
results_df_consideration["Importance"] * 100
)
average_value_consideration = results_df_consideration[
"Importance_percent"
].mean()
img_consideration = plot_model_results(
results_df_consideration,
average_value_consideration,
f"Consideration Drivers: {file_name}",
"Consideration",
)
else:
print("data not present")
# plot satisfaction
img_satisfaction = None
results_df_satisfaction = None
if satisfaction_present:
# Get n_samples from output text
n_samples_satisfaction = output_text.split(": Satisfaction")[1]
n_samples_satisfaction = n_samples_satisfaction.split("Analysis based on ")[
1
]
n_samples_satisfaction = n_samples_satisfaction.split("observations")[0]
results_df_satisfaction = pd.read_csv(csv_output_path_satisfaction)
results_df_satisfaction["Importance_percent"] = (
results_df_satisfaction["Importance"] * 100
)
average_value_satisfaction = results_df_satisfaction[
"Importance_percent"
].mean()
img_satisfaction = plot_model_results(
results_df_satisfaction,
average_value_satisfaction,
f"Satisfaction Drivers: {file_name}",
"Satisfaction",
)
else:
print("data not present")
# plot trust builder table 1 and 2
df_builder_pivot = None
if trustbuilder_present:
# Create dataframe for trust builder
results_df_builder = pd.read_csv(csv_output_path_trustbuilder)
bucket_colors = {
"Stability": "lightblue",
"Development": "lightgreen",
"Relationship": "lavender",
"Benefit": "lightyellow",
"Vision": "orange",
"Competence": "lightcoral",
}
combined_data = {
"Message": results_df_builder["Message"],
"Stability": results_df_builder["Stability"].round(0).astype(int),
"Development": results_df_builder["Development"].round(0).astype(int),
"Relationship": results_df_builder["Relationship"].round(0).astype(int),
"Benefit": results_df_builder["Benefit"].round(0).astype(int),
"Vision": results_df_builder["Vision"].round(0).astype(int),
"Competence": results_df_builder["Competence"].round(0).astype(int),
}
df_builder = pd.DataFrame(combined_data)
# Prepare lists to collect data
buckets = []
messages = []
percentages = []
bucket_columns = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Iterate through each bucket column
for bucket in bucket_columns:
for index, value in results_df_builder[bucket].items():
if value > 0:
buckets.append(bucket)
messages.append(results_df_builder["Message"][index])
percentages.append(int(round(value)))
# Create the new DataFrame
builder_consolidated = {
"Trust Bucket®": buckets,
"TrustBuilders®": messages,
"%": percentages,
}
df_builder_pivot = pd.DataFrame(builder_consolidated)
# Define the order of the Trust Bucket® categories
trust_driver_order = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
#trust_driver_order = [
# "Stability",
# "Development",
# "Relationship",
# "Competence",
#]
# Convert Trust Bucket® column to a categorical type with the specified order
df_builder_pivot["Trust Bucket®"] = pd.Categorical(
df_builder_pivot["Trust Bucket®"],
categories=trust_driver_order,
ordered=True,
)
# Sort the DataFrame by 'Trust Bucket®' and '%' in descending order within each 'Trust Bucket®'
df_builder_pivot = df_builder_pivot.sort_values(
by=["Trust Bucket®", "%"], ascending=[True, False]
)
# After processing, ensure to delete the temporary files and directory
os.remove(csv_output_path_trust)
if nps_present:
os.remove(csv_output_path_nps)
if loyalty_present:
os.remove(csv_output_path_loyalty)
if consideration_present:
os.remove(csv_output_path_consideration)
if satisfaction_present:
os.remove(csv_output_path_satisfaction)
if trustbuilder_present:
os.remove(csv_output_path_trustbuilder)
os.remove(text_output_path)
if img_nps is None:
# Load the placeholder image if NPS analysis was not performed
img_nps = Image.open("./images/nps_not_available.png")
img_nps = img_nps.resize((1000, 800), Image.Resampling.LANCZOS)
if img_loyalty is None:
# Load the placeholder image if Loyalty analysis was not performed
img_loyalty = Image.open("./images/loyalty_not_available.png")
img_loyalty = img_loyalty.resize((1000, 800), Image.Resampling.LANCZOS)
if img_consideration is None:
# Load the placeholder image if Consideration analysis was not performed
img_consideration = Image.open("./images/consideration_not_available.png")
img_consideration = img_consideration.resize(
(1000, 800), Image.Resampling.LANCZOS
)
if img_satisfaction is None:
# Load the placeholder image if Satisfaction analysis was not performed
img_satisfaction = Image.open("./images/satisfaction_not_available.png")
img_satisfaction = img_satisfaction.resize(
(1000, 800), Image.Resampling.LANCZOS
)
return (
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
results_df_trust,
results_df_nps,
results_df_loyalty,
results_df_consideration,
results_df_satisfaction,
)
except Exception as e:
logger.error("Error analyzing Excel file: %s", e)
raise
finally:
if os.path.exists(temp_dir):
try:
os.rmdir(temp_dir)
except Exception as e:
logger.error("Error removing temporary directory: %s", e)
def highlight_trust_bucket(row):
if "stability of trust buckets" in row["Trust Bucket®"]:
return ['background-color: yellow'] * len(row) # Apply yellow background to the entire row
return [''] * len(row)
def batch_file_processing(file_paths):
"""
Analyzes all Excel files in a list of file paths and generates plots for all models.
Args:
file_paths (List[str]): List of paths to the Excel files.
Returns:
Image: Image of the Trust regression plot.
Image: Image of the NPS regression plot.
Image: Image of the Loyalty regression plot.
Image: Image of the Consideration regression plot.
Image: Image of the Satisfaction regression plot.
str: Summary of the analysis.
"""
img_bucketfull_list = []
img_trust_list = []
img_nps_list = []
img_loyalty_list = []
img_consideration_list = []
img_satisfaction_list = []
df_builder_pivot_list = []
output_text_list = []
for file_path in file_paths:
try:
(
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
results_df_trust,
results_df_nps,
results_df_loyalty,
results_df_consideration,
results_df_satisfaction,
) = analyze_excel_single(file_path)
img_bucketfull_list.append(img_bucketfull)
img_trust_list.append(img_trust)
img_nps_list.append(img_nps)
img_loyalty_list.append(img_loyalty)
img_consideration_list.append(img_consideration)
img_satisfaction_list.append(img_satisfaction)
df_builder_pivot_list.append(df_builder_pivot)
output_text_list.append(output_text)
except Exception as e:
logger.error("Error processing file %s: %s", file_path, e)
return (
img_bucketfull_list,
img_trust_list,
img_nps_list,
img_loyalty_list,
img_consideration_list,
img_satisfaction_list,
df_builder_pivot_list,
output_text_list,
)
def highlight_stability(s):
return [
"background-color: yellow; font-weight: bold;" if "stability" in str(v).lower() else ""
for v in s
]
from PIL import Image, ImageDraw, ImageFont
def add_heading_to_image(image: Image.Image, heading: str, font_size=28):
# Create a heading image
width = image.width
heading_height = font_size + 20
total_height = image.height + heading_height
new_img = Image.new("RGB", (width, total_height), (255, 255, 255))
draw = ImageDraw.Draw(new_img)
try:
font = ImageFont.truetype("arial.ttf", font_size)
except:
font = ImageFont.load_default()
draw.text((10, 10), heading, font=font, fill=(0, 0, 0))
new_img.paste(image, (0, heading_height))
return new_img
def combine_two_images_horizontally(img1: Image.Image, heading1: str, img2: Image.Image, heading2: str):
img1 = add_heading_to_image(img1, heading1)
img2 = add_heading_to_image(img2, heading2)
max_height = max(img1.height, img2.height)
total_width = img1.width + img2.width
combined = Image.new("RGB", (total_width, max_height), (255, 255, 255))
combined.paste(img1, (0, 0))
combined.paste(img2, (img1.width, 0))
return combined
def bold_high_impact_row(row):
try:
if float(row["%"]) >= 18:
return ['font-weight: bold'] * len(row)
except:
pass
return [''] * len(row)
def variable_outputs(file_inputs):
file_inputs_single = file_inputs
# Call batch file processing and get analysis results
(
img_bucketfull_list,
img_trust_list,
img_nps_list,
img_loyalty_list,
img_consideration_list,
img_satisfaction_list,
df_builder_pivot_list,
output_text_list,
) = batch_file_processing(file_inputs_single)
# Get number of datasets uploaded
k = len(file_inputs_single)
# Container for visible plots
global plots_visible
plots_visible = []
# Use zip_longest to iterate over the lists, padding with None
for row, (
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
) in enumerate(
zip_longest(
img_bucketfull_list,
img_trust_list,
img_nps_list,
img_loyalty_list,
img_consideration_list,
img_satisfaction_list,
df_builder_pivot_list,
output_text_list,
)
):
# Get dataset name
dataset_name = file_inputs_single[row].split("/")[-1]
global plots
# Based on the number of files uploaded, determine the content of each textbox
plots = [
gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'>Trust and NPS Drivers</span>",
visible=True,
),
gr.Markdown(
"""
The analysis identifies the TrustLogic® dimensions that are most effective in driving your audience's likelihood to recommend and trust you
""",
visible=True,
),
# ✅ Side-by-side Trust & NPS drivers
gr.Image(
value=combine_two_images_horizontally(img_trust, "Trust Drivers", img_nps, "NPS Drivers"),
type="pil",
label="Trust + NPS Drivers",
visible=True,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Image(
value=None,
type="pil",
visible=False,
),
gr.Textbox(
value=output_text,
visible=False,
),
]
# add current plots to container
plots_visible += plots
if isinstance(df_builder_pivot, pd.DataFrame):
logger.debug(f"df_builder_pivot: {df_builder_pivot}")
markdown_5 = gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'> What to say and do to build your trust and Net Promoter Score </span>",
visible=True,
)
markdown_6 = gr.Markdown(
"<span style='font-size:17px; font-weight:bold;'>You see the most effective attributes for fulfilling your Trust and NPS Drivers — the things you need to say and do to increase recommendation and build trust. Scroll down to use them with our TrustLogicAI.</span>",
#+ "<br>In the table, use the little arrow in each column to toggle the most to least effective TrustBuilders® to fill each Trust Bucket®. Your focus is only on the Trust Bucket® with the highest driver impact. "
# + "<br> Note: Even if Trust Buckets® for Customers and Prospects overlap, the most effective statements are very different. This provides clear guidance for acquisition versus loyalty activities.",
visible=True,
)
styled_df = df_builder_pivot.style.apply(bold_high_impact_row, axis=1)
table_builder_2 = gr.Dataframe(
value=styled_df,
headers=list(df_builder_pivot.columns),
interactive=False,
label=f"{dataset_name}",
visible=True,
height=800,
wrap=True,
)
plots_visible.append(markdown_5)
plots_visible.append(markdown_6)
plots_visible.append(table_builder_2)
else:
plots_visible.append(gr.Markdown("", visible=False))
plots_visible.append(gr.Markdown("", visible=False))
plots_visible.append(gr.Dataframe(value=None, label="", visible=False))
plots_invisible = [
gr.Markdown("", visible=False),
gr.Markdown("", visible=False),
gr.Image(label="Trust Buckets", visible=False),
gr.Markdown("", visible=False),
gr.Markdown("", visible=False),
gr.Image(label="Trust Drivers", visible=False),
gr.Image(label="NPS Drivers", visible=False),
gr.Image(label="Loyalty Drivers", visible=False),
gr.Image(label="Consideration Drivers", visible=False),
gr.Image(label="Satisfaction Drivers", visible=False),
gr.Textbox(label="Analysis Summary", visible=False),
gr.Markdown("", visible=False),
gr.Markdown("", visible=False),
gr.Dataframe(value=None, label=" ", visible=False),
]
return plots_visible + plots_invisible * (max_outputs - k)
def reset_outputs():
# Reset outputs
outputs = []
# Create fixed dummy components
markdown_3 = gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'>Trust and NPS Drivers</span>",
visible=True,
)
markdown_4 = gr.Markdown(
"""
This analysis shows which Trust Buckets® are most effective in building trust and improving your key performance indicators (KPIs).
<br><br>
The middle line is the average importance. The bars extending to the right show which Trust Buckets® are most important. The higher the percentage, the more important the Trust Bucket® is to your audience.
""",
visible=True,
)
trust_plot = gr.Image(value=None, label="Trust Drivers", visible=False)
nps_plot = gr.Image(value=None, label="NPS Drivers", visible=False)
loyalty_plot = gr.Image(value=None, label="Loyalty Drivers", visible=False)
consideration_plot = gr.Image(
value=None, label="Consideration Drivers", visible=False
)
satisfaction_plot = gr.Image(value=None, label="Satisfaction Drivers", visible=False)
summary_text = gr.Textbox(value=None, label="Analysis Summary", visible=False)
markdown_5 = gr.Markdown(
"<span style='font-size:20px; font-weight:bold;'>TrustBuilders®",
visible=True,
)
markdown_6 = gr.Markdown(
"These are the specific reasons to trust and recommend. They can be your brand values, features, attributes, programs, and messages. "
+ "<br>For practical purposes, they tell you exactly what to do and say to build more trust and improve your KPIs. "
+ "<br>In the table, use the little arrows to toggle by Trust Bucket® or Trust Builder® importance. "
+ "<br>Tip: Compare Owners and Prospects. Even though some of the Trust Buckets® are the same, the Trust Builders® are very different.",
visible=True,
)
df_builder_pivot = gr.Dataframe(value=None, label="", visible=True)
outputs.append(markdown_3)
outputs.append(markdown_4)
outputs.append(trust_plot)
outputs.append(nps_plot)
outputs.append(loyalty_plot)
outputs.append(consideration_plot)
outputs.append(satisfaction_plot)
outputs.append(summary_text)
outputs.append(markdown_5)
outputs.append(markdown_6)
outputs.append(df_builder_pivot)
# invisible from second set onwards
for i in range(1, max_outputs):
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Image(value=None, label="", visible=False))
outputs.append(gr.Textbox(value=None, label="", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Markdown("", visible=False))
outputs.append(gr.Dataframe(value=None, label="", visible=False))
return outputs
def data_processing(file_path):
"""
Processes a single CSV file and generates required outputs.
Args:
file_path (str): Path to the CSV file.
Returns:
tuple: Contains processed data and results (customize based on your needs).
"""
try:
logger.info("Processing CSV file: %s", file_path)
# Load the first two rows to get the column names
header_df = pd.read_csv(file_path, header=None, nrows=2)
# Fill NaN values in the rows with an empty string
header_df.iloc[0] = header_df.iloc[0].fillna("")
header_df.iloc[1] = header_df.iloc[1].fillna("")
# Merge the two rows to create column names
merged_columns = header_df.iloc[0] + " " + header_df.iloc[1]
# Load the rest of the DataFrame using the merged column names
df = pd.read_csv(file_path, skiprows=2, names=merged_columns)
# For any value in all columns that contain " - " (rating),
# split and only take the first part (in digit format)
def split_value(val):
if isinstance(val, str) and " - " in val:
return val.split(" - ")[0]
return val
# Apply the function to all elements of the DataFrame
df = df.applymap(split_value)
# Convert the columns from the third column onwards to numeric
df.iloc[:, 2:] = df.iloc[:, 2:].apply(pd.to_numeric, errors="coerce")
# Search for the text in the column names
search_text = "how likely are you to buy another".lower()
col_index = [
i for i, col in enumerate(df.columns) if search_text in col.lower()
]
if col_index:
col_index = col_index[0] # Assuming there is only one matching column
# Define the mapping dictionary for reverse replacement
replace_map = {1: 5, 2: 4, 4: 2, 5: 1}
# Replace values in the specified column
df.iloc[:, col_index] = df.iloc[:, col_index].replace(replace_map)
column_mapping = {
"Did you own a": "Q1",
"your age": "Q2",
"How likely are you to recommend buying a": "NPS",
"level of trust": "Trust",
"buy another": "Loyalty",
"consider buying": "Consideration",
"Has built a strong and stable foundation": "Stability",
"Will develop well in the future": "Development",
"Relates well to people like me": "Relationship",
"Is valuable to our lives": "Benefit",
"Has vision and values I find appealing": "Vision",
"Has what it takes to succeed": "Competence",
}
# Create a list to hold the labels
list_labels = []
# Loop through each column in merged_columns
for col in merged_columns:
label = None
for key, value in column_mapping.items():
if key.lower() in col.lower():
label = value
break
if label:
list_labels.append(label)
# Determine the difference between the lengths of list_labels and merged_columns
difference = len(merged_columns) - len(list_labels)
# TRUST STATEMENTS TB1 - TB37 populate to the rest of columns
# Append the next values ("TB1", "TB2", ...) until list_labels matches the length of merged_columns
for i in range(difference):
list_labels.append(f"TB{i + 1}")
# Add list_labels as the first row after the column names
df_labels = pd.DataFrame([list_labels], columns=df.columns)
# Concatenate header_df, df_labels, and df
header_df.columns = df.columns # Ensure header_df has the same columns as df
# Create a DataFrame with 2 rows of NaNs
nan_rows = pd.DataFrame(np.nan, index=range(2), columns=df.columns)
# Pad 2 rows of NaNs, followed by survey questions to make it the same format as the input excel file
df = pd.concat([nan_rows, header_df, df_labels, df]).reset_index(drop=True)
# Make list labels the column names
df.columns = list_labels
# Remove columns beyond TB37
max_tb_label = 37
tb_columns = [col for col in df.columns if col.startswith("TB")]
tb_columns_to_keep = {f"TB{i + 1}" for i in range(max_tb_label)}
tb_columns_to_drop = [
col for col in tb_columns if col not in tb_columns_to_keep
]
df.drop(columns=tb_columns_to_drop, inplace=True)
# Take snippets from df as drivers
kpis = [
"Trust",
"NPS",
"Loyalty",
"Consideration",
"Satisfaction",
]
drivers = [
"Stability",
"Development",
"Relationship",
"Benefit",
"Vision",
"Competence",
]
# Create an empty list to store the selected columns
selected_columns = []
# Check each item in kpis and drivers and search in df.columns
for kpi in kpis:
for col in df.columns:
if pd.notna(col) and kpi.lower() in col.lower():
selected_columns.append(col)
for driver in drivers:
for col in df.columns:
if pd.notna(col) and driver.lower() in col.lower():
selected_columns.append(col)
# Extract the selected columns into a new DataFrame df_drivers
df_drivers = df[selected_columns].iloc[4:].reset_index(drop=True)
# Create a DataFrame with 2 rows of NaNs
nan_rows = pd.DataFrame(np.nan, index=range(2), columns=df_drivers.columns)
# Pad 3 rows of NaNs to make it the same format as the input excel file
df_drivers = pd.concat([nan_rows, df_drivers]).reset_index(drop=True)
# Get dataset name
dataset_name = file_path.split("/")[-1]
dataset_name = dataset_name.split(".")[0]
# Create a temporary directory
temp_dir = tempfile.mkdtemp()
logger.info("Created temporary directory for processed file: %s", temp_dir)
# Save processed df as an Excel file in the temporary directory
processed_file_path = os.path.join(temp_dir, f"{dataset_name}.xlsx")
with pd.ExcelWriter(processed_file_path) as writer:
df_drivers.to_excel(writer, sheet_name="Driver", index=False)
df.to_excel(writer, sheet_name="Builder", index=False)
return processed_file_path
except Exception as e:
logger.error("Error processing CSV file: %s", e)
raise
def process_examples(file_name):
file_path = f"example_files/{file_name[0]}"
file_path = [file_path]
outputs = variable_outputs(file_path)
return outputs
def process_datasets(file_inputs):
"""
Processes uploaded datasets and calls appropriate functions based on file type.
Args:
file_inputs (List[UploadFile]): List of uploaded files.
Returns:
List[gr.Blocks]: List of Gradio output components.
"""
outputs_list = []
for file_input in file_inputs:
file_path = file_input.name
file_extension = os.path.splitext(file_path)[-1].lower()
if file_extension == ".xlsx":
outputs_list.append(file_path)
elif file_extension == ".csv":
try:
processed_file_path = data_processing(file_path)
outputs_list.append(processed_file_path)
except Exception as e:
logger.error("Error processing file %s: %s", file_path, e)
outputs = variable_outputs(outputs_list)
return outputs
# Load knowledge base
def load_knowledge_base():
try:
loader = TextLoader("./data_source/time_to_rethink_trust_book.md")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
return docs
except Exception as e:
logger.error(f"Error loading knowledge base: {e}")
raise e
knowledge_base = load_knowledge_base()
# Initialize embeddings and FAISS index
try:
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(knowledge_base, embeddings)
except Exception as e:
logger.error(f"Error initializing FAISS index: {e}")
raise e
# Define search function for knowledge base
def search_knowledge_base(query):
try:
output = db.similarity_search(query)
return output
except Exception as e:
logger.error(f"Error searching knowledge base: {e}")
return ["Error occurred during knowledge base search"]
# SERPER API Google Search function
def google_search(query):
try:
search_client = serpapi.Client(api_key=serper_api_key)
results = search_client.search(
{
"engine": "google",
"q": query,
}
)
snippets = [result["snippet"] for result in results.get("organic_results", [])]
return snippets
except requests.exceptions.HTTPError as http_err:
logger.error(f"HTTP error occurred: {http_err}")
return ["HTTP error occurred during Google search"]
except Exception as e:
logger.error(f"General Error: {e}")
return ["Error occurred during Google search"]
# RAG response function
def rag_response(query):
try:
retrieved_docs = search_knowledge_base(query)
context = "\n".join(doc.page_content for doc in retrieved_docs)
prompt = f"Context:\n{context}\n\nQuestion: {query}\nAnswer:"
llm = ChatOpenAI(model="gpt-4o", temperature=0.5, api_key=openai_api_key)
response = llm.invoke(prompt)
return response.content
except Exception as e:
logger.error(f"Error generating RAG response: {e}")
return "Error occurred during RAG response generation"
def compute_dataframe_proof_point():
global selected_dataset_ai
global df_builder_pivot_str
try:
# Load the selected dataset
dataset_file_path = f"example_files/{selected_dataset_ai}"
(
img_bucketfull,
img_trust,
img_nps,
img_loyalty,
img_consideration,
img_satisfaction,
df_builder_pivot,
output_text,
results_df_trust,
results_df_nps,
results_df_loyalty,
results_df_consideration,
results_df_satisfaction,
) = analyze_excel_single(dataset_file_path)
if df_builder_pivot is not None:
qualified_bucket_names_list = []
# Remove buckets with values below 18%
qualified_bucket_names_trust = results_df_trust[
results_df_trust["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_trust)
if results_df_nps is not None:
qualified_bucket_names_nps = results_df_nps[
results_df_nps["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_nps)
if results_df_loyalty is not None:
qualified_bucket_names_loyalty = results_df_loyalty[
results_df_loyalty["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_loyalty)
if results_df_consideration is not None:
qualified_bucket_names_consideration = results_df_consideration[
results_df_consideration["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_consideration)
if results_df_satisfaction is not None:
qualified_bucket_names_satisfaction = results_df_satisfaction[
results_df_satisfaction["Importance_percent"] >= 18
]["Predictor"].tolist()
qualified_bucket_names_list.append(qualified_bucket_names_satisfaction)
# Flatten the list of lists and convert to a set to remove duplicates
qualified_bucket_names_flat = [
item for sublist in qualified_bucket_names_list for item in sublist
]
qualified_bucket_names_unique = list(set(qualified_bucket_names_flat))
# Filter df_builder_pivot to include only statements where "Trust Driver" is in qualified_bucket_names_unique
df_builder_pivot = df_builder_pivot[
df_builder_pivot["Trust Bucket®"].isin(qualified_bucket_names_unique)
]
# Remove statements with values below 18%
df_builder_pivot = df_builder_pivot[df_builder_pivot["%"] >= 18]
df_builder_pivot_str = df_builder_pivot.to_string(index=False)
else:
df_builder_pivot_str = "Trust Builder information is not available."
except FileNotFoundError:
df_builder_pivot_str = "Dataset not found."
except Exception as e:
df_builder_pivot_str = f"An error occurred during analysis: {e}"
return df_builder_pivot_str
# Define tools using LangChain's `tool` decorator
@tool
def knowledge_base_tool(query: str):
"""
Tool function to query the knowledge base and retrieve a response.
Args:
query (str): The query to search the knowledge base.
Returns:
str: The response retrieved from the knowledge base.
"""
return rag_response(query)
@tool
def google_search_tool(query: str):
"""
Tool function to perform a Google search using the SERPER API.
Args:
query (str): The query to search on Google.
Returns:
list: List of snippets extracted from search results.
"""
return google_search(query)
@tool
def compute_dataframe_proof_point_tool() -> str:
"""
Tool function to compute DATAFRAME_PROOF_POINT.
Returns:
str: The computed DATAFRAME_PROOF_POINT as a string.
"""
return compute_dataframe_proof_point()
tavily_tool = TavilySearchResults(
max_results=5,
search_depth="advanced",
topic="news",
days=1,
include_answer=True,
include_raw_content=True,
# include_domains=[...],
exclude_domains=['example.com'],
# name="...", # overwrite default tool name
# description="...", # overwrite default tool description
# args_schema=..., # overwrite default args_schema: BaseModel
)
# compile all tools as a list
tools = [
knowledge_base_tool,
tavily_tool,
#compute_dataframe_proof_point_tool,
]
def validate_ai_output(ai_output, proof_points):
"""
Validates that the AI output includes all relevant Trust Buckets and Builders.
Args:
ai_output: The generated response from the AI.
proof_points: The DATAFRAME_PROOF_POINT dictionary with Trust Buckets and Builders.
Returns:
Validated and corrected output.
"""
validated_output = ai_output
missing_buckets = []
# Check if all relevant buckets are included
for bucket, builders in proof_points.items():
if bucket not in ai_output:
missing_buckets.append(bucket)
# Add missing buckets and builders if any
if missing_buckets:
corrections = []
for bucket in missing_buckets:
corrections.append(f"**{bucket}**")
for builder in proof_points[bucket]:
corrections.append(f"- {builder['Trust Builder']} [{builder['Percentage']}%]")
validated_output = f"{validated_output}\n\nMissing Data:\n" + "\n".join(corrections)
return validated_output
# Create the prompt template
prompt_message = """
**Role**
You are an expert copywriter specializing in creating high-quality marketing content that integrates Top-Scoring Statements for Each Trust Bucket into various formats. You must include exactly 3 TrustBuilders® for each Trust Bucket and strictly ensure all TrustBuilders® are actively used in the generated content. Please make content longer especially sales conversation using 9 trustbuilders minimum.
*Strictly use google search for finding features*
mention about optional beats audio system upgrade
**Listing Top-Scoring Statements**
- Use the following format to display top-scoring statements:
Top-scoring statements
**Bucket Name**
- TrustBuilder® Statement 1 [Percentage]
- TrustBuilder® Statement 2 [Percentage]
- TrustBuilder® Statement 3 [Percentage]
```
Generating Content**
- Integrate **all listed TrustBuilders®** into the requested content format. Strictly minimu 9 trustbuilders must be used to make content longer.
- Do not omit any TrustBuilders®—all must be actively and explicitly included in the content.
- Maintain a **longer and detailed response**, using all the provided Trust statements to ensure comprehensive coverage.
- Use google search as well.
---
**Strict Requirements**
1. **Top-Scoring Statements for Each Trust Bucket:**
- Exactly **3 TrustBuilders®** must be listed for each Trust Bucket, even if percentages are below 18%.
- Ensure consistency in the format.
2. **Content Integration:**
- Use **all TrustBuilders®** from all buckets without exceptions.
- Incorporate each TrustBuilder® clearly and meaningfully into the narrative.
---
**Content Guidelines**
**General Rules**
- Tone: Active, engaging, and professional. Avoid flowery or overly complex language.
- Specificity: Include relevant names, numbers (e.g., dollars, years), programs, awards, strategies, or locations.
---
**Content Types**
1. **Annual Reports/Articles/blog **
- Intro line: "Here is a draft of your Blog . Feel free to suggest further refinements."
- Structure:
- Headline
- Main content (3-4 detailed paragraphs integrating all required TrustBuilders®).
- Additional Sections:
- List of TrustBuilders® Used : list minimum 9 top scoring statements retrieved
- Heuristics Used: List 3-5 relevant heuristics.
- Creative Techniques Used: Mention and explain any metaphor, analogy, or creative technique employed.
2. **Sales Conversations/Ad Copy**
- Strictly use google search
- Structure:
Detailed conversation.
- Intro line: "Here is a draft of your [Sales Conversation/Ad Copy]. Feel free to suggest further refinements."
- Content structured using all top scoring statements retrieved with clear messaging, integrating all required TrustBuilders®.
- Additional Sections:
- List of TrustBuilders® Used : mention minimum 9 top scoring statements
- Heuristics Used: List 3-5 relevant heuristics.
- Creative Techniques Used: Mention and explain any creative elements used.
3. **Emails, Newsletters, Direct Marketing Letters**
- Intro Line: "Here is a draft of your [Email/Newsletter/Letter]. Feel free to suggest further refinements."
- Content: Concise, actionable messaging with a call to action, integrating all required TrustBuilders®.
- Additional Sections:
- List of TrustBuilders® Used : mention minimum 9 top scoring statements
- Heuristics Used: List 3-5 relevant heuristics.
- Creative Techniques Used: Highlight creative approaches used.
#### **GENERAL QUERIES**
- For blogs or reports, refer to the knowledge base first. Focus on overall flow and structure without mentioning trust metrics unless requested.
"""
prompt_template = ChatPromptTemplate.from_messages(
[
("system", prompt_message),
MessagesPlaceholder(variable_name="chat_history"),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
# Create Langchain Agent with specific model and temperature
try:
llm = ChatOpenAI(model="gpt-4o", temperature=0.5)
llm_with_tools = llm.bind_tools(tools)
except Exception as e:
logger.error(f"Error creating Langchain Agent: {e}")
# Define the agent pipeline to handle the conversation flow
try:
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_tool_messages(
x["intermediate_steps"]
),
"chat_history": lambda x: x["chat_history"],
}
| prompt_template
| llm_with_tools
| OpenAIToolsAgentOutputParser()
)
# Instantiate an AgentExecutor to execute the defined agent pipeline
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
except Exception as e:
logger.error(f"Error defining agent pipeline: {e}")
# Initialize chat history
chat_history = []
trust_tips = [
"What I don’t know I can’t trust you for. Make sure you know all your great TrustBuilders® and use them over time.",
"The more specific, the more trustworthy each TrustBuilder® is.",
"For TrustBuilders®, think about each Trust Bucket® and in each one organization, product, and key individuals.",
"You are infinitely trustworthy. Organization, products, and your people. In each Trust Bucket® and past, present, and future.",
"Some TrustBuilders® are enduring (we have over 3 million clients), others changing (we are ranked No. 1 for 8 years/9 years), and yet others short-lived (we will present at XYZ conference next month).",
"Not all Trust Buckets® are equally important all the time. Think about which ones are most important right now and how to fill them (with TrustAnalyser® you know).",
"In social media, structure posts over time to focus on different Trust Buckets® and themes within them.",
"Try focusing your idea on specific Trust Buckets® or a mix of them.",
"Within each Trust Bucket®, ask for examples across different themes like employee programs, IT, R&D.",
"To create more and different trust, ask trustifier.ai to combine seemingly unconnected aspects like 'I played in bands all my youth. What does this add to my competence as a lawyer?'",
"With every little bit more trust, your opportunity doubles. It's about using trustifier.ai to help you nudge trust up ever so slightly in everything you do.",
"Being honest is not enough. You can be honest with one aspect and destroy trust and build a lot of trust with another. Define what that is.",
"The more I trust you, the more likely I am to recommend you. And that's much easier with specifics.",
"What others don’t say they are not trusted for - but you can claim that trust.",
"Building more trust is a service to your audience. It's so valuable to us, as humans, that we reflect that value right away in our behaviors.",
"In your audience journey, you can use TrustAnalyser® to know precisely which Trust Buckets® and TrustBuilders® are most effective at each stage of the journey.",
"Try structuring a document. Like % use of each Trust Bucket® and different orders in the document.",
"In longer documents like proposals, think about the chapter structure and which Trust Buckets® and TrustBuilders® you want to focus on when.",
"Building Trust doesn’t take a long time. Trust is built and destroyed every second, with every word, action, and impression. That's why it's so important to build more trust all the time.",
"There is no prize for the second most trusted. To get the most business, support, and recognition, you have to be the most trusted.",
"With most clients, we know they don’t know 90% of their available TrustBuilders®. Knowing them increases internal trust - and that can be carried to the outside.",
"Our client data always shows that, after price, trust is the key decision factor (and price is a part of benefit and relationship trust).",
"Our client data shows that customer value increases 9x times from Trust Neutral to High Trust. A good reason for internal discussions.",
"Our client's data shows that high trust customers are consistently far more valuable than just trusting ones.",
"Trust determines up to 85% of your NPS. No wonder, because the more I trust you, the more likely I am to recommend you.",
"Trust determines up to 75% of your loyalty. Think about it yourself. It's intuitive.",
"Trust determines up to 87% of your reputation. Effectively, they are one and the same.",
"Trust determines up to 85% of your employee engagement. But what is it that they want to trust you for?",
"Don't just ask 'what your audience needs to trust for'. That just keeps you at low, hygiene trust levels. Ask what they 'would love to trust for'. That's what gets you to High Trust."
]
suggestions = [
"Try digging deeper into a specific TrustBuilder®.",
"Ask just for organization, product, or a person's TrustBuilders® for a specific Trust Bucket®.",
"Some TrustBuilders® can fill more than one Trust Bucket®. We call these PowerBuilders. TrustAnalyser® reveals them for you.",
"Building trust is storytelling. trustifier.ai connects Trust Buckets® and TrustBuilders® for you. But you can push it more to connect specific Trust Buckets® and TrustBuilders®.",
"Describe your audience and ask trustifier.ai to choose the most relevant Trust Buckets®, TrustBuilders®, and tonality (TrustAnalyser® can do this precisely for you).",
"Ask trustifier.ai to find TrustBuilders® for yourself. Then correct and add a few for your focus Trust Buckets® - and generate a profile or CV.",
"LinkedIn Profiles are at their most powerful if they are regularly updated and focused on your objectives. Rewrite it every 2-3 months using different Trust Buckets®.",
"Share more of your TrustBuilders® with others and get them to help you build your trust.",
"Build a trust strategy. Ask trustifier.ai to find all your TrustBuilders® in the Trust Buckets® and then create a trust-building program for a specific person/audience over 8 weeks focusing on different Trust Buckets® that build on one another over time. Then refine and develop by channel ideas.",
"Brief your own TrustBuilders® and ask trustifier.ai to tell you which Trust Buckets® they're likely to fill (some can fill more than one).",
"Have some fun. Ask trustifier.ai to write a 200-word speech to investors using all Trust Buckets®, but leading and ending with Development Trust. Use [BRAND], product, and personal CEO [NAME] TrustBuilders®.",
"Ask why TrustLogic® can be trusted in each Trust Bucket®.",
"Ask what's behind TrustLogic®."
]
def get_trust_tip_and_suggestion():
trust_tip = random.choice(trust_tips)
suggestion = random.choice(suggestions)
return trust_tip, suggestion
def get_top_scoring_statements(dataset):
"""
Retrieve top-scoring statements for specific trust buckets based on dataset
Args:
dataset (str): The selected dataset ('VW Owners' or 'VW Prospects')
Returns:
str: Formatted top-scoring statements
"""
if dataset == "VW Owners.xlsx":
# Top statements for VW Owners
top_statements = """
Top Scoring Statements:
*Development*
- We bring together the world's best talent in many disciplines to create your cars.(25%)
- Building great and affordable cars is our foundation.(22%)
- Our beginnings are a unique combination of investors and unions, and today 9 of our 20 board members are staff representatives.(18%)
*Benefit*
- We bring together the world's best talent in many disciplines to create your cars.(23%)
- We strongly focus on keeping and nurturing our team and have a 99.5% retention rate.(18%)
- Employees are provided with extensive continuous training.(16%)
*Vision*
- Our brands are ranked No. 2 and 5 in the reliability rankings.(27%)
- Our technology and manufacturing capabilities are second to none.(22%)
- We produce almost 9 million cars per year.(15%)
"""
elif dataset == "Volkswagen Non Customers.xlsx":
# Top statements for VW Prospects
top_statements = """
Top Scoring Statements:
*Stability*
- We work with our unions in our restructuring and future plans.(21%)
- We have learned from our mistakes in the Diesel Affair and made fundamental changes.(19%)
- Building great and affordable cars is our foundation.(18%)
*Relationship*
- We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (24%)
- We are at the forefront of technology to deliver better cars and driving experiences. (17%)
- Our beginnings are a unique combination of investors and unions, and today 9 of our 20 board members are staff representatives. (17%)
*Competence*
- At the heart of our decision-making is the long-term quality of life for all of us. (20%)
- We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (19%)
- We are one of the longest-established car companies. (18%)
"""
return top_statements
last_top_scoring_statements = None
def chatbot_response(message, history, selected_dataset):
"""
Generate chatbot response dynamically using selected dataset, user input, and maintaining history.
"""
global last_top_scoring_statements
try:
if not selected_dataset:
return [("Error", "❌ No dataset selected. Please select one and try again.")], history
# Define datasets and corresponding trust buckets
datasets = {
"VW Owners.xlsx": {
"Development": [
"We bring together the world's best talent in many disciplines to create your cars. (25%)",
"Building great and affordable cars is our foundation. (22%)",
"Our beginnings are a unique combination of investors and unions. (18%)",
],
"Benefit": [
"We bring together the world's best talent in many disciplines to create your cars. (23%)",
"We strongly focus on keeping and nurturing our team and have a 99.5% retention rate. (18%)",
"Employees are provided with extensive continuous training. (16%)",
],
"Vision": [
"Our brands are ranked No. 2 and 5 in the reliability rankings. (27%)",
"Our technology and manufacturing capabilities are second to none. (22%)",
"We produce almost 9 million cars per year. (15%)",
],
},
"Volkswagen Non Customers.xlsx": {
"Stability": [
"We work with our unions in our restructuring and future plans. (21%)",
"We have learned from our mistakes in the Diesel Affair and we have made fundamental changes. (19%)",
"Building great and affordable cars is our foundation. (18%)",
],
"Relationship": [
"We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (24%)",
"We are at the forefront of technology to deliver better cars and driving experiences. (17%)",
"Our beginnings are a unique combination of investors and unions and today 9 of our 20 board members are staff representatives. (17%)",
],
"Competence": [
"At the heart of our decision-making is the long-term quality of life for all of us. (20%)",
"We put a lot of emphasis on the interior experience and two of our cars have been ranked in the top 10. (19%)",
"We are one of the longest-established car companies. (18%)",
],
},
}
if selected_dataset not in datasets:
return [("Error", f"Invalid dataset: {selected_dataset}")], history
# Build top-scoring statements block
trust_data = datasets[selected_dataset]
top_scoring_statements = "### Top Scoring Statements ###\n\n"
for bucket, statements in trust_data.items():
top_scoring_statements += f"**{bucket}**:\n"
for statement in statements:
top_scoring_statements += f"- {statement}\n"
top_scoring_statements += "\n"
last_top_scoring_statements = top_scoring_statements
# Build prompt
combined_prompt = "\n\n### Top-Scoring Statements for Integration ###\n" + top_scoring_statements
combined_prompt += "\n\nUser Input:\n" + message
trust_tip, suggestion = get_trust_tip_and_suggestion()
trust_tip_and_suggestion = f"\n\n---\n\n**Trust Tip**: {trust_tip}\n\n**Suggestion**: {suggestion}"
# Use existing history in prompt
for entry in history:
combined_prompt += f"\n{entry['role']}: {entry['content']}"
# Structured input to agent
structured_input = {
"input": combined_prompt,
"chat_history": history,
}
# Get agent response
agent_output = agent_executor.invoke(structured_input)
# Build full AI reply
full_response = f"**Selected Dataset: {selected_dataset}**\n\n"
full_response += top_scoring_statements
full_response += f"\n{agent_output['output']}"
full_response += trust_tip_and_suggestion
# Update history
updated_history = history + [
{"role": "user", "content": message},
{"role": "assistant", "content": agent_output["output"]}
]
# Reconstruct chatbot message list
chatbot_pairs = []
for i in range(0, len(updated_history) - 1, 2):
if i + 1 < len(updated_history):
chatbot_pairs.append((updated_history[i]["content"], updated_history[i + 1]["content"]))
return chatbot_pairs, updated_history
except Exception as e:
logger.error(f"Unexpected error: {e}")
return [("Error", "❌ Something went wrong. Please try again.")], history
def read_ai_dataset_selection():
global selected_dataset_ai
return selected_dataset_ai
import matplotlib.pyplot as plt
import numpy as np
import io
import base64
def generate_trust_score_image(score):
max_score = 10
values = [score, max_score - score]
# Match R² chart size
fig, ax = plt.subplots(figsize=(3.6, 3.6), subplot_kw=dict(aspect="equal"))
# Donut chart
wedges, _ = ax.pie(
values,
startangle=90,
counterclock=False,
colors=["#4CAF50", "#C0C0C0"],
wedgeprops=dict(width=0.35)
)
# Center Score
ax.text(0, 0, f"{score}", ha='center', va='center', fontsize=19, fontweight='bold')
# Radial labels (adjusted for perfect alignment)
labels = ["9–10: High Trust", "0–4: Low Trust", "7–8: Trust", "5–6: Trust Neutral"]
angles = [135, 45, 225, 315] # TL, TR, BL, BR
radius = 1.5
for label, angle in zip(labels, angles):
x = radius * np.cos(np.deg2rad(angle))
y = radius * np.sin(np.deg2rad(angle))
# Shift top labels up, bottom labels down
if angle < 90 or angle > 270:
y += 0.02
else:
y -= 0.08
# Extra downward nudge for right side to align with left
if angle == 45 or angle == 315:
y -= 0.05
ax.text(x, y, label, ha='center', va='center', fontsize=9, color='black')
# Clean layout
ax.axis('off')
fig.patch.set_alpha(0.0)
ax.patch.set_alpha(0.0)
# Save to buffer
buf = io.BytesIO()
plt.savefig(buf, format='png', transparent=True, dpi=200)
plt.close(fig)
buf.seek(0)
img_base64 = base64.b64encode(buf.read()).decode("utf-8")
# Return HTML with title + image
return f"""
<div style='display: flex; flex-direction: column; align-items: center;'>
<h3 style='text-align:center; margin-bottom:6px;'>Trust Score</h3>
<img src='data:image/png;base64,{img_base64}' style='max-width: 240px; height: auto;'/>
</div>
"""
# Create fixed score variants
def display_trust_score_1():
return generate_trust_score_image(7.9)
def display_trust_score_2():
return generate_trust_score_image(6.8)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
import base64
def reset_variables():
"""
Reset global variables to handle dataset changes.
"""
global selected_dataset_ai
selected_dataset_ai = None
def update_ai_dataset_selection(selection):
global selected_dataset_ai # Ensure the global variable is updated
reset_variables()
chat_history.clear()
if selection == "VW Owners":
selected_dataset_ai = vw_customers_state.value[0] # Use the customer dataset
elif selection == "VW Prospects":
selected_dataset_ai = vw_prospects_state.value[0] # Use the prospects dataset
return selected_dataset_ai
placeholder_text = """"""
predefined_prompt = """
What: Car showroom sales conversation between a prospective buyer of a new T-Roc and our VW advisor.
Who: The visitor is a 24-year-old female, stylishly dressed in brand items.
Topics:
1. Future car usage.
2. Current car and experience with it.
Specifics:
- Highlight T-Roc features that connect with her interests. Find the feature on the T-Roc US website.
- Discuss petrol and electric engine types.
- Focus on aesthetics, exterior design and strong interior features/experience.
Proof Points and Feature Usage:
- Connect features contextually and creatively.
- Be specific with the features and examples, including feature names, numbers, brands, facts, and their implications for the driving and ownership experience.
Style:
- End responses with a question or suggestion to steer to the next topic.
- Convey TrustBuilders® naturally.
"""
# Text input box for the user to enter their prompt
prompt_textbox = gr.Textbox(
value=predefined_prompt,
scale=4,
label="Insert your prompt",
visible=True,
)
submit_button = gr.Button("Submit")
bot = gr.Chatbot(placeholder=placeholder_text)
js_func = """
function refresh() {
// Force light theme if not already
const url = new URL(window.location);
if (url.searchParams.get('__theme') !== 'light') {
url.searchParams.set('__theme', 'light');
window.location.href = url.href;
return;
}
// Highlight toggle
const btn1 = document.querySelector("#vw_customers_btn");
const btn2 = document.querySelector("#vw_prospects_btn");
if (btn1 && btn2) {
btn1.classList.add("active-btn"); // Default on load
btn1.addEventListener("click", () => {
btn1.classList.add("active-btn");
btn2.classList.remove("active-btn");
});
btn2.addEventListener("click", () => {
btn2.classList.add("active-btn");
btn1.classList.remove("active-btn");
});
}
}
"""
css = """
#vw_customers_btn, #vw_prospects_btn {
border: 2px solid #ccc;
padding: 10px 20px;
font-size: 16px;
font-weight: 500;
border-radius: 6px;
color: #333;
background-color: #fff;
margin: 0 5px;
transition: all 0.25s ease;
min-width: 140px;
text-align: center;
}
/* Highlighted (clicked) button — with teal text */
.active-btn {
background-color: #e0f7f5 !important; /* Light teal background */
color: teal !important; /* Teal text */
border-color: teal !important;
box-shadow: 0 0 6px rgba(0, 128, 128, 0.3);
}
/* Optional hover effect */
#vw_customers_btn:hover, #vw_prospects_btn:hover {
background-color: #f9f9f9;
}
.gr-button:has(svg) {
display: none !important;
}
.gr-image-label {
display: none !important;
}
#chat_container {
max-height: 900px;
overflow-y: auto;
margin-top: 0 !important;
margin-bottom: 0 !important;
padding-bottom: 0 !important;
}
"""
def highlight_button(button_name):
return gr.Button.update(variant="primary")
# 🧠 Chatbot backend
def vwload_nps_and_r2(file_path):
nps_img = calculate_nps_image_from_excel(file_path)
r2_img = vwcalculate_r2_image_from_excel(file_path)
return nps_img,r2_img
def load_nps_and_r2(file_path):
nps_img = calculate_nps_image_from_excel(file_path)
r2_img = calculate_r2_image_from_excel(file_path)
return nps_img, r2_img
with gr.Blocks(css=css, js=js_func) as demo:
gr.HTML("""
<style>
#trust_driver_img, #nps_driver_img {
display: inline-block;
width: 49%;
margin-right: 1%;
vertical-align: top;
}
</style>
""")
# Title and intro
with gr.Column():
gr.Markdown("""
<h2 style="text-align: center; font-size: 2.25rem; font-weight: 600;">
What drives your NPS and trust?
</h2>
""")
gr.Markdown("Quickly identify what drives your NPS and trust across different segments using the automated analyser.")
gr.Markdown("""
<span style="font-size:15px;">Volkswagen Example</span><br>
As a default, the analysis displays <strong>Volkswagen Owner</strong> results.
To trigger the analysis for <strong>Prospects</strong>, toggle to ‘VW Prospects’.
""")
with gr.Column():
with gr.Row():
vw_customers_btn = gr.Button("VW Owners", elem_id="vw_customers_btn")
vw_prospects_btn = gr.Button("VW Prospects", elem_id="vw_prospects_btn")
with gr.Row(equal_height=True):
with gr.Column(scale=1):
nps_img_output = gr.HTML()
with gr.Column(scale=1):
trust_score_output = gr.HTML()
with gr.Column(scale=1):
gr.Markdown("""<div style='text-align: center;'><h3>How much of your NPS is determined by TrustLogic®</h3></div>""")
trust_r2_img = gr.HTML()
with gr.Column():
outputs = reset_outputs()
# 🔁 States
vw_customers_state11 = gr.State(value=["example_files/VW Owners.xlsx"])
vw_prospects_state12 = gr.State(value=["example_files/Volkswagen Non Customers.xlsx"])
vw_customers_state = gr.State(value=["VW Owners.xlsx"])
vw_prospects_state = gr.State(value=["Volkswagen Non Customers.xlsx"])
selected_dataset_ai = gr.State(value="VW Owners.xlsx") # ✅ Matches dictionary key
chat_history = gr.State(value=[])
# 🧠 Chat section
with gr.Column(elem_id="chat_container"):
gr.Markdown("### Test-drive the results in the TrustLogicAI")
gr.Markdown("Our AI uses the analysis results to generate trust-optimised content.")
prompt_textbox = gr.Textbox(value=predefined_prompt, scale=4, label="Insert your prompt", visible=True)
submit_button = gr.Button("Submit")
bot = gr.Chatbot(placeholder=placeholder_text)
submit_button.click(
fn=chatbot_response,
inputs=[prompt_textbox, chat_history, selected_dataset_ai],
outputs=[bot, chat_history]
)
## All widget functions here ##
vw_customers_btn.click(
fn=display_trust_score_1,
inputs=[],
outputs=trust_score_output,
)
vw_prospects_btn.click(
fn=display_trust_score_2,
inputs=[],
outputs=trust_score_output,
)
vw_customers_btn.click(
fn=process_examples,
inputs=[vw_customers_state],
outputs= outputs,
)
vw_prospects_btn.click(
fn=process_examples,
inputs=[vw_prospects_state],
outputs= outputs,
)
def set_vw_owners():
return "VW Owners.xlsx"
def set_vw_prospects():
return "Volkswagen Non Customers.xlsx"
vw_customers_btn.click(
fn=set_vw_owners,
inputs=[],
outputs=selected_dataset_ai
)
vw_prospects_btn.click(
fn=set_vw_prospects,
inputs=[],
outputs=selected_dataset_ai
)
vw_customers_btn.click(
fn=lambda f: vwload_nps_and_r2(f[0]),
inputs=[vw_customers_state11],
outputs=[nps_img_output, trust_r2_img],
)
vw_prospects_btn.click(
fn=lambda f: load_nps_and_r2(f[0]),
inputs=[vw_prospects_state12],
outputs=[nps_img_output, trust_r2_img],
)
demo.load(
fn=lambda f: vwload_nps_and_r2(f[0]),
inputs=[vw_customers_state11],
outputs=[nps_img_output, trust_r2_img],
)
demo.load(
fn=display_trust_score_1,
inputs=[],
outputs=trust_score_output
)
demo.load(
fn=process_examples,
inputs=[vw_customers_state],
outputs=outputs
)
try:
demo.launch(server_name="0.0.0.0")
except Exception as e:
logger.error(f"Error launching Gradio app: {e}")
raise e |